index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
999,500 | 09af77947ee6be307b6e10f5dbda5af82f9d3f2e | """
Module with logging utilities
"""
import vlogging
import net.utilities
import net.data
def log_model_analysis(
logger, image, segmentation_image, model, indices_to_colors_map, void_color, colors_to_ignore):
"""
Log analysis of model's prediction on a single image.
Logs input image, ground truth segmentation, predicted segmentation, ground truth segmentation overlaid
over input image, and predicted segmentation overlaid over input image
:param logger: logger instance to which analysis should be logged
:param image: input image for model to predict on
:param segmentation_image: ground truth segmentation for input image
:param model: net.ml.Model instance
:param indices_to_colors_map: dictionary mapping categories ids to segmentation colors
:param void_color: color representing pixels without any category assigned
:param colors_to_ignore: list of colors that should be ignored when constructing overlays
"""
ground_truth_overlay_image = net.utilities.get_segmentation_overlaid_image(
image, segmentation_image, colors_to_ignore)
predicted_segmentation_cube = model.predict(image)
predicted_segmentation_image = net.data.get_segmentation_image(
predicted_segmentation_cube, indices_to_colors_map, void_color)
predicted_overlay_image = net.utilities.get_segmentation_overlaid_image(
image, predicted_segmentation_image, colors_to_ignore)
logger.info(vlogging.VisualRecord(
"Data", [image, segmentation_image, predicted_segmentation_image,
ground_truth_overlay_image, predicted_overlay_image]))
|
999,501 | 771e8d156f9a2861a36605a4a8cf32c94e8f0f3a | import settings
import os
import requests
def splitlist(list_, number):
lists = []
last_number = 0.
while last_number < len(list_):
new_list = list_[int(last_number):int(last_number + \
(len(list_)/float(number)))]
lists.append(new_list)
last_number += len(list_)/float(number)
while len(lists) < number:
lists.append([])
return lists
def create_dir(name):
if not os.path.isdir(name):
os.makedirs(name)
settings.logger.log("Created directory '{name}'".format(name=name))
def to_tracker(l):
r = requests.post('http://tracker.archiveteam.org/newsgrabber/rescue-me',
data={'items': '\n'.join(l)})
return r.status_code == 200
|
999,502 | def410421f34e6d085b2e230df2cd34b9aaf138e | from collections import defaultdict
for _ in range(int(input())):
num_customers, num_compartments = map(int, input().split())
customer_list = []
for i in range(num_customers):
customer_list.append(list(map(int, input().split())))
customer_list.sort(key=lambda x: x[1])
allowed_count = 0
compartments = defaultdict(int)
for customer in customer_list:
if compartments[customer[2]] <= customer[0]:
compartments[customer[2]] = customer[1]
allowed_count += 1
print(allowed_count)
|
999,503 | 8fc7cea0bac46ba4e5a4c163ca3959a678e512d0 | from sharpie_set import Sharpie_set
from sharpie import Sharpie
green = Sharpie("green", 0.1)
red = Sharpie("red", 0.2)
orange = Sharpie("orange", 0.3)
new_set = Sharpie_set()
green.use()
new_set.add(green)
new_set.add(red)
new_set.add(orange)
print(new_set.count_usable())
|
999,504 | 50ed0f61e909b1db87d45355b8de3a4446c853a5 | import requests
from bs4 import BeautifulSoup
res = requests.get('https://movie.douban.com/top250?start=0&filter=')
soup = BeautifulSoup(res.text,'html.parser')
tag = soup.find('div',class_='article')
li_all = tag.find_all('li')
list_all = []
for key in li_all:
tag_url = key.find('a')['href']
tag_order = key.find('em').text
tag_name = key.find('span',class_='title').text
tag_quote = key.find('span',class_='inq').text
tag_num = key.find('span',class_='rating_num').text
sub_list = [tag_order,tag_name,tag_num,tag_quote,tag_url]
list_all.append(sub_list)
print(list_all) |
999,505 | 294d7e8556e23da77462fece2b0f51c75b5c070d | from django.shortcuts import render, render_to_response, get_object_or_404
from principal.models import Jugador, Pareja, Partido, Pista, Arbitro
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
def inicio(request):
return render_to_response("inicio.html", context_instance=RequestContext(request))
def portada(request):
return render_to_response("portada.html", context_instance=RequestContext(request))
def index(request):
return render_to_response("index.html", context_instance=RequestContext(request))
def lista_jugadores(request):
jugadores = Jugador.objects.all()
return render_to_response('lista_jugadores.html',{'jugadores':jugadores},context_instance=RequestContext(request))
def jugador(request, nombre):
x = Jugador.objects.get(nombre=nombre)
return render_to_response('jugador.html',{'y':x}, context_instance=RequestContext(request))
def lista_arbitros(request):
arbitros = Arbitro.objects.all()
return render_to_response("lista_arbitros.html",{'datos':arbitros}, context_instance=RequestContext(request))
def lista_partidos(request):
partidos = Partido.objects.order_by('fecha')
return render_to_response("lista_partidos.html",{'datos':partidos}, context_instance=RequestContext(request))
def partido(request, idpartido):
x = Partido.objects.get(idpartido=idpartido)
return render_to_response('partido.html',{'y':x}, context_instance=RequestContext(request))
def lista_pistas(request):
pistas = Pista.objects.all()
return render_to_response("lista_pistas.html",{'datos':pistas}, context_instance=RequestContext(request))
def lista_parejas(request):
parejas = Pareja.objects.all()
return render_to_response("lista_parejas.html",{'datos':parejas}, context_instance=RequestContext(request))
def ranking(request):
parejas = Pareja.objects.order_by('-puntos') #Poniendo el - delante, hacemos orden inverso
return render_to_response("ranking.html",{'datos':parejas}, context_instance=RequestContext(request))
|
999,506 | 1eebd2e07bb61ecfc3b26b75682f2543288ee5b8 | 1:
1.1:
False
1.2:
True
1.3:
False
1.4:
True
1.5:
True
'9/15'
2:
len(s)*2-len(s)**2
'10/10'
3:
3.1:
b
3.2:
a
3.3:
b
'10/15'
4:
1:
g
2:
h
3:
f
4:
i
5:
c
6:
d
'12/18'
5:
c
'0/5'
6:
6.1.
class LIFO(JobQueue):
def depart(self):
try:
return self.job.pop()
except:
print 'depart called with an empty queue'
raise ValueError('EmptyQueue')
change busStop to be a underclass of LIFO
'10/10'
6.2.
No because so many was left over already with low capacity busses.
'0/7'
7:
2
'0/5'
8:
return totPassengers/leftWaiting
'3/5'
9:
1:
C
2:
A
3:
B
'10/10'
|
999,507 | 8eca16b83eddf6d994537c4fb81ef70d263571ae | import json
from difflib import get_close_matches
# imported data
data = json.load(open("data.json"))
# translate is not a good function name
def translate (w):
# functions convert to a lower string always
w=w.lower()
# conditionals
if w in data:
return data[w]
#condition for input data with title case
elif w.title() in data:
return data[w.title()]
#condition for input data with acronyms
elif w.upper() in data:
return data[w.upper()]
# check if input is correct to keys available sequence matcher
elif len(get_close_matches(w, data.keys())) > 0:
# user input if wrong program provides suggestions, store in input in variable(yn) to compare
yn= input("Did you mean %s instead? Enter Y if Yes, N if No" % get_close_matches(w, data.keys())[0])
if yn =="Y":
return get_close_matches(w, data.keys())[0]
# condition if suggested words by program are not what the user intended
elif yn =="N":
return "The word does'nt exist"
# end statemnt if words suggested by program are not what the user wanted
else:
return "We did not understand your query"
else:
return "The word does not exist"
# when user enters a word that does not exist
word = input ("Enter word: ")
output = translate(word)
if type(output) == list:
for item in output:
print(item)
else:
print(output) |
999,508 | bcc60d540edd22d75c2189b0b988d276f4984971 | #!/usr/bin/env python3
import ast
import base64
import argparse
from datetime import timedelta
import boto3
from psycopg2 import sql
from botocore.exceptions import ClientError
from use_postgres import UseDatabase
def main():
"""Connects to Aurora Database, calculates the delta between
most recent GROW anomaly and most recent GROW recorded date.
Inserts the delta as 'days_since_anomaly' column in
'grow_anomalies' Aurora table.
"""
aurora_secret = get_aurora_secret()
aurora_creds = {
'host': aurora_secret['host'],
'port': aurora_secret['port'],
'dbname': aurora_secret['engine'],
'user': aurora_secret['username'],
'password': aurora_secret['password']
}
with UseDatabase(aurora_creds) as cursor:
sql_anomaly = """SELECT grow_table,
MAX(GREATEST(soil_date, light_date, air_date))
FROM grow_anomalies
GROUP BY grow_table;"""
cursor.execute(sql_anomaly)
anomaly_dates = cursor.fetchall()
all_deltas = []
for i in anomaly_dates:
sql_select = sql.SQL("""SELECT MAX(datetime)
FROM {}""").format(sql.Identifier(i[0]))
cursor.execute(sql_select)
result_datetime = cursor.fetchone()
all_deltas.append([i[0], result_datetime[0] - i[1]])
for i in all_deltas:
sql_update = sql.SQL("""UPDATE public.grow_anomalies
SET days_since_anomaly = {}
WHERE grow_table = {}""").format(
sql.Literal(i[1].days),
sql.Literal(i[0])
)
cursor.execute(sql_update)
def get_aurora_secret():
"""Retrieve AWS RDS Aurora credentials from AWS Secrets Manager"""
secret_name = "grow-data-key"
region_name = "eu-west-1"
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
raise e
else:
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
secret = ast.literal_eval(secret)
return secret
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return decoded_binary_secret
if __name__ == '__main__':
main() |
999,509 | 1970cf29aa92b1264925eecefa7c3dd88a4a0d4b | '''
Created on 18.06.2009
@author: Lars Vogel
'''
def add(a,b):
return a+b
def addFixedValue(a):
y=5
return y+a
print( add(1, 2))
print( addFixedValue(3)) |
999,510 | 66e0dd72c4ce890095fef48e4b7d421a3585f516 | import numpy as np
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
import cv2
import math
from scipy import misc
print(int(10.62545))
|
999,511 | ba9fff76e1081767a0c5e22e5ce13dee5c7f5a4b | #!/usr/bin/env python
# Mikhail (myke) Kolodin, 2022
# 2022-12-14 2022-12-14 1.0
# prod3max.py
# даны случайные числа [-100 .. 100],
# найти произведение 3 макс различных чисел
from random import randint
from math import prod
MINVAL = -100 # мин. число
MAXVAL = 100 # макс. число
LEN = 10 # длина массива
SUB = 3 # длина вырезки
TESTS = 10 # сколько тестов
def doit():
""" выполнить 1 тест """
arr = [randint(MINVAL, MAXVAL) for _ in range(LEN)]
print("исходный массив", arr)
vals = sorted(list(set(arr)), reverse=True)[:SUB]
res = prod(vals)
print("числа:", vals, ", произведение:", res)
# выполнить все тесты
for test in range(1, TESTS+1):
print("\nтест №", test)
doit()
# примечание:
# если различных чисел менее 3, возьмутся только различные, пусть их 2 или 1
# (меньше 1 быть не может :) )
# ~ тест № 1
# ~ исходный массив [44, 64, 26, 98, 60, 0, -69, 100, -60, -10]
# ~ числа: [100, 98, 64] , произведение: 627200
# ~ тест № 2
# ~ исходный массив [-72, 29, -48, -43, 76, 12, -20, 48, 47, -40]
# ~ числа: [76, 48, 47] , произведение: 171456
# ~ тест № 3
# ~ исходный массив [-24, 24, -14, -14, -49, 71, 74, -53, 42, 1]
# ~ числа: [74, 71, 42] , произведение: 220668
# ~ тест № 4
# ~ исходный массив [-63, -74, -53, -75, 61, -68, -25, 47, -31, 4]
# ~ числа: [61, 47, 4] , произведение: 11468
# ~ тест № 5
# ~ исходный массив [30, -57, -37, -66, 88, -1, 18, 37, 83, -80]
# ~ числа: [88, 83, 37] , произведение: 270248
# ~ тест № 6
# ~ исходный массив [67, 54, -68, -52, -54, -87, -62, -77, -74, 13]
# ~ числа: [67, 54, 13] , произведение: 47034
# ~ тест № 7
# ~ исходный массив [-97, 84, 89, -45, -34, 28, 97, -81, -74, -35]
# ~ числа: [97, 89, 84] , произведение: 725172
# ~ тест № 8
# ~ исходный массив [-4, -5, -42, -62, -64, -94, 83, 36, -58, -82]
# ~ числа: [83, 36, -4] , произведение: -11952
# ~ тест № 9
# ~ исходный массив [73, -12, 67, -74, -94, -93, -51, 58, 27, -32]
# ~ числа: [73, 67, 58] , произведение: 283678
# ~ тест № 10
# ~ исходный массив [27, -86, -97, 52, -17, 52, 46, 78, -51, -88]
# ~ числа: [78, 52, 46] , произведение: 186576
|
999,512 | e1a5e719133c7e1ffe59008df09f66c1512d92a7 | import os
import time
from pprint import pprint
# releaseTime = template_releaseTimeStamp; // template_releaseTime_human
# beneficiary = address('template_beneficiarr_address');
# string constant public name = "template_constract_name";
options = [
{ "address": "0x18089Cb45906F19889c44c23A86b96062C245865",
"startdate": "2018-08-22 0:00:00",
"name": "Tom",
"iscurent": 1,
},
]
def genContract(item):
# setup paths
startdate = item["startdate"]
address = item["address"]
name = item["name"]
fileprefix = "smn_{}_{}_{}".format(startdate[:10], address, name)
dirname = "../../../contracts/locker/smnlocker/"
template = os.path.join(dirname, "template.sol")
contract = os.path.join(dirname, fileprefix + ".sol")
fileprefix = os.path.join(dirname, fileprefix)
item["template"] = template
item["contract"] = contract
item["fileprefix"] = fileprefix
# timestamp
startTimestamp = time.mktime(time.strptime(startdate, "%Y-%m-%d %H:%M:%S"))
startTimestamp = int(startTimestamp)
releaseTimestamp = startTimestamp + 365*24*3600
releaseDate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(releaseTimestamp))
item["startTimestamp"] = startTimestamp
item["releaseTimestamp"] = releaseTimestamp
item["releaseDate"] = releaseDate
pprint(item)
# gen contract from template
templateStr = open(template).read()
templateStr = templateStr.replace("template_releaseTimeStamp", str(releaseTimestamp))
templateStr = templateStr.replace("template_releaseTime_human", releaseDate)
templateStr = templateStr.replace("template_beneficiarr_address", address)
templateStr = templateStr.replace("template_constract_name", "wtc locker for " + name)
# save contract
with open(contract, 'w') as dataout:
dataout.write(templateStr)
def genAbiBin(item):
pprint(item)
contract = item["contract"]
binname = item["fileprefix"] + ".bin"
abiname = item["fileprefix"] + ".abi"
# compile
cmd = 'solc {} --bin --abi'.format(item["contract"])
print(cmd)
cnt = 0
contractName = ''
codeType = ''
for line in os.popen(cmd).readlines():
print(line)
if '======' in line:
contractName = line.split(':')[1].split()[0]
if 'Binary:' in line:
cnt = 0
codeType = 'bin'
if 'ABI' in line:
cnt = 0
codeType = 'abi'
if cnt == 1:
#print(contractName, line[:100]),
if contractName in ["WaltonTokenLocker"] and '====' not in line:
if codeType == 'bin':
code = '0x%s' % line.strip()
with open(binname, 'w') as outfile:
outfile.write(code)
else:
code = line.strip()
with open(abiname, 'w') as outfile:
outfile.write(code)
#string = '%s%s = %s;\n' % (codeType, contractName, code)
#outfile.write(string)
#print(string)
cnt += 1
def main():
for item in options:
# only process on iscurent==1
if not item["iscurent"]:
continue
# contract
genContract(item)
# abi, bin, readme, run
genAbiBin(item)
if __name__ == "__main__":
main()
|
999,513 | 704bc480cd14ea62c15d773b862b0a748abb2ad6 | from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext_lazy as _, ungettext
from django.contrib import messages
from django.conf import settings
from django.contrib.sitemaps import Sitemap
from django.template import TemplateDoesNotExist
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from haystack.query import SearchQuerySet
from froide.foirequest.models import FoiRequest
from froide.helper.utils import render_400, render_403
from froide.helper.cache import cache_anonymous_page
from .models import (PublicBody,
PublicBodyTag, FoiLaw, Jurisdiction)
from .csv_import import CSVImporter
def index(request, jurisdiction=None, topic=None):
if jurisdiction is not None:
jurisdiction = get_object_or_404(Jurisdiction, slug=jurisdiction)
if topic is not None:
topic = get_object_or_404(PublicBodyTag, slug=topic)
query = request.GET.get('q', '')
if query:
publicbodies = SearchQuerySet().models(PublicBody).auto_query(query)
else:
publicbodies = PublicBody.objects.get_list()
if topic:
publicbodies = publicbodies.filter(tags=topic.name if query else topic)
if jurisdiction:
publicbodies = publicbodies.filter(
jurisdiction=jurisdiction.name if query else jurisdiction)
page = request.GET.get('page')
paginator = Paginator(publicbodies, 50)
try:
publicbodies = paginator.page(page)
except PageNotAnInteger:
publicbodies = paginator.page(1)
except EmptyPage:
publicbodies = paginator.page(paginator.num_pages)
return render(request, 'publicbody/list.html', {
'object_list': publicbodies,
'jurisdictions': Jurisdiction.objects.get_list(),
'jurisdiction': jurisdiction,
'topic': topic,
'topics': PublicBodyTag.objects.get_topic_list(),
'query': query,
})
@cache_anonymous_page(15 * 60)
def show_jurisdiction(request, slug):
jurisdiction = get_object_or_404(Jurisdiction, slug=slug)
context = {
"object": jurisdiction,
"pb_count": PublicBody.objects.filter(jurisdiction=jurisdiction).count(),
"laws": FoiLaw.objects.filter(meta=False,
jurisdiction=jurisdiction).order_by('priority'),
"foirequests": FoiRequest.published.filter(jurisdiction=jurisdiction)[:5]
}
try:
return render(request,
'publicbody/jurisdiction/%s.html' % jurisdiction.slug, context)
except TemplateDoesNotExist:
return render(request,
'publicbody/jurisdiction.html', context)
def show_foilaw(request, slug):
law = get_object_or_404(FoiLaw, slug=slug)
context = {"object": law}
return render(request, 'publicbody/show_foilaw.html', context)
def show_publicbody(request, slug):
obj = get_object_or_404(PublicBody, slug=slug)
context = {
'object': obj,
'foirequests': FoiRequest.published.filter(
public_body=obj).order_by('-last_message')[:10],
'resolutions': FoiRequest.published.get_resolution_count_by_public_body(obj),
'foirequest_count': FoiRequest.published.filter(public_body=obj).count()
}
return render(request, 'publicbody/show.html', context)
@require_POST
def confirm(request):
if not request.user.is_authenticated:
return render_403(request)
if not request.user.is_staff and not request.user.is_superuser:
return render_403(request)
try:
pb = get_object_or_404(PublicBody, pk=int(request.POST.get('public_body', '')))
except ValueError:
return render_400(request)
result = pb.confirm()
if result is None:
messages.add_message(request, messages.ERROR,
_('This request was already confirmed.'))
else:
messages.add_message(request, messages.ERROR,
ungettext('%(count)d message was sent.',
'%(count)d messages were sent', result
) % {"count": result})
return redirect('admin:publicbody_publicbody_change', pb.id)
@require_POST
def import_csv(request):
if not request.user.is_authenticated:
return render_403(request)
if not request.user.is_staff and not request.user.is_superuser:
return render_403(request)
if not request.method == 'POST':
return render_403(request)
importer = CSVImporter()
url = request.POST.get('url')
try:
if not url:
raise ValueError(_('You need to provide a url.'))
importer.import_from_url(url)
except Exception as e:
messages.add_message(request, messages.ERROR, str(e))
else:
messages.add_message(request, messages.SUCCESS,
_('Public Bodies were imported.'))
return redirect('admin:publicbody_publicbody_changelist')
SITEMAP_PROTOCOL = 'https' if settings.SITE_URL.startswith('https') else 'http'
class PublicBodySitemap(Sitemap):
protocol = SITEMAP_PROTOCOL
changefreq = "monthly"
priority = 0.6
def items(self):
return PublicBody.objects.all()
def lastmod(self, obj):
return obj.updated_at
class JurisdictionSitemap(Sitemap):
protocol = SITEMAP_PROTOCOL
changefreq = "yearly"
priority = 0.8
def items(self):
return Jurisdiction.objects.all()
class FoiLawSitemap(Sitemap):
protocol = SITEMAP_PROTOCOL
changefreq = "yearly"
priority = 0.3
def items(self):
return FoiLaw.objects.all()
def lastmod(self, obj):
return obj.updated
|
999,514 | d5c73ddae8719281cd49393e7a061a7a9ff7140d | import hampath
import sattv
"""
BioComp.changeMer(30)
BioComp.createNodes(7)
BioComp.connectNodes(["0->1,3,6", "1->2,3", "2->1,3", "3->2,4,", "4->1,5", "5->2,6"]) # from research paper
BioComp.report()
"""
"""
SATTruthValue.changeMer(30)
"""
# SATTruthValue.createNodes(["`x1,x2,`x3", "x1,x2,~x3", "`x1,x2,x3"])
# SATTruthValue.createNodes(["`x1,x2,`x3,x4", "x1,x2,~x3,x4", "`x1,x2,x3,x4"])
SATTruthValue.createNodes(["~X3,~X16,~X18", "X5,X12,~X9", "~X13,~X2,X20", "X12,~X9,~X5", "X19,~X4,X6", "X9,X12,~X5", "~X1,X4,X11", "X13,~X2,~X19", "X5,X17,X9", "~X5,~X9,~X12", "X6,X11,X4", "~X15,~X17,X7", "~X6,X19,X13", "~X12,~X9,X5", "X12,X1,X14", "X20,X3,X2", "X10,~X7,~X8", "~X5,X9,~X12", "X18,~X20,X3", "~X10,~X18,~X16", "X1,~X11,~X14", "X8,~X7,~X15", "~X8,X16,~X10"]) # from research paper
SATTruthValue.report()
|
999,515 | 40ba05a61f1d93e2d2d45072bea8c5adbf5dd537 | from app import create_app
import os
import sys
sys.path.append(os.getcwd())
config_name ='development'
app = create_app(config_name)
if __name__ == "__main__":
app.run()
|
999,516 | a9341c04f99a950153f86b4cbb9517e03929e5b5 | import datetime
import random
from django.contrib import messages
from django.shortcuts import render, redirect
def index(request):
try:
request.session['ninja_gold']
except KeyError:
request.session['ninja_gold'] = 0
try:
request.session['activities']
except KeyError:
request.session['activities'] = []
try:
request.session['min_gold']
except KeyError:
request.session['min_gold'] = float('inf')
try:
request.session['max_moves']
except KeyError:
request.session['max_moves'] = float('inf')
return render(request, 'index_bonus.html')
def process_money(request, place):
farm = [10, 20]
cave = [5, 10]
house = [2, 5]
casino = [-50, 50]
place_lim = []
if place == "farm":
place_lim = farm
elif place == "cave":
place_lim = cave
elif place == "house":
place_lim = house
elif place == "casino":
place_lim = casino
rand_num = random.randint(place_lim[0], place_lim[1])
request.session['ninja_gold'] += rand_num
if rand_num >= 0:
request.session['activities']\
.append(f"Earned {rand_num} golds from the {place}! ({datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')})")
else:
request.session['activities']\
.append(f"Entered a {place} and lost {abs(rand_num)} golds... Ouch... ({datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')})")
if request.session['ninja_gold'] >= (request.session['min_gold']) and len(request.session['activities']) \
<= request.session['max_moves']:
messages.success(request, "CONGRATS. YOU WON!")
elif len(request.session['activities']) > request.session['max_moves']:
messages.error(request, "YOU HAVE EXCEEDED THE MAXIMUM NUMBER OF MOVES")
return redirect("restart_session_bonus")
return redirect("index_bonus")
def restart_session(request):
request.session['ninja_gold'] = 0
request.session['activities'] = []
return redirect("index_bonus")
|
999,517 | 2f8f2290d65c97d09aaf7053459dd74e6a7aaa51 | # Generated by Django 3.1.13 on 2021-09-13 11:02
import api_img.models
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=255, verbose_name='Тип изображения')),
('big_image', models.ImageField(upload_to='images/api_img/big/', validators=[django.core.validators.FileExtensionValidator(allowed_extensions=['jpg', 'jpeg'], message='Файл должен быть одним из: jpg, jpeg'), api_img.models.validate_file_image_max_size], verbose_name='Большое изображение')),
('min_image', models.ImageField(blank=True, null=True, upload_to='images/api_img/min/', verbose_name='Маленькое изображение')),
],
),
]
|
999,518 | 17fa58f664ed99daec39fbe5695765007e0c72da | import os.path
import math
import numpy as np
import conversions as convs
def calcs_all(name, lattice, gbdata, pa_parent, dim, proc, k):
print('Calculating energy values')
path = pa_parent+'/wield/bin'
os.chdir(path)
os.system('mkdir '+name)
Eul1 = gbdata[:,(0,1,2)]
Eul2 = gbdata[:,(3,4,5)]
xe1 = gbdata[:, 16]
ye1 = gbdata[:, 15]
xe2 = gbdata[:, 18]
ye2 = gbdata[:, 17]
i = 0
### Matrix to transform coordinates into WIELD default frame.
Base = ([[1,0,0],[0,0,-1],[0,1,0]])
if dim == 1:
while i < len(gbdata):
# Euler Rotation Matrix
e1 = convs.eultomat(Eul1[i,:])
e2 = convs.eultomat(Eul2[i,:])
nl = [0,0]
nl[0]=xe1[i]-xe2[i]
nl[1]=ye1[i]-ye2[i]
if nl[0]==0:
inplane = np.pi/2 + np.pi/2
else:
inplane = np.arctan(nl[1]/nl[0]) + np.pi/2
if inplane < 0:
inplane = np.pi + inplane
if inplane >= np.pi:
inplane = inplane - np.pi
c, s = np.cos(inplane), np.sin(inplane)
R = np.matrix([[c, -s, 0], [s, c, 0], [0, 0, 1]])
Rt = convs.transpose(R)
g1 = np.matmul(Rt, e1)
g2 = np.matmul(Rt, e1)
gg1 = np.matmul(Base, g1)
gg2 = np.matmul(Base, g2)
#### Create temporary input file for convenience
infile = str(name)+'.in'
fout = open(infile, 'w')
fout.write('$Energy1D\n#$Energy2D\n$a 3.615\n$Sigma1 $a*0.175\n$Sigma2 $Sigma1\n')
fout.write('$Epsilon 0.5\n$GroundZ1 1 1 1\n$GroundZ2 1 1 1\n$GroundX1 -1 1 0\n$GroundX2 -1 1 0\n$Tolerance 1E-8\n$AlphaX1 $a\n$AlphaY1 $a\n$AlphaZ1 $a\n$Order1 8\n$X1 0. 0. 0. 0. 0. $a/2. -$a/2. $a/2. -$a/2. $a/2. $a/2. -$a/2. -$a/2.\n$Y1 0. $a/2. $a/2. -$a/2. -$a/2. 0. 0. 0. 0. $a/2. -$a/2. $a/2. -$a/2.\n$Z1 0. $a/2. -$a/2. $a/2. -$a/2. $a/2. $a/2. -$a/2. -$a/2. 0. 0. 0. 0.\n$AlphaX2 $AlphaX1\n$AlphaY2 $AlphaY1\n$AlphaZ2 $AlphaZ1\n$Order2 $Order1\n$X2 $X1\n$Y2 $Y1\n$Z2 $Z1\n')
fout.write('$OutFile ../bin/'+name+'/en_'+str(i)+'.out\n')
fout.write('$BungeEuler1 '+str(np.degrees(Eul1[i,0]))+' '+str(np.degrees(Eul1[i,1]))+' '+str(np.degrees(Eul1[i,2]))+'\n')
fout.write('$BungeEuler2 '+str(np.degrees(Eul2[i,0]))+' '+str(np.degrees(Eul2[i,1]))+' '+str(np.degrees(Eul2[i,2]))+'\n')
fout.write('$TraceAngle '+str(np.degrees(inplane))+'\n')
#fout.write('$RotAxes1 z x z x\n$Rots1 ($phi2_1) ($Phi_1) ($phi1_1) 90\n')
#fout.write('$RotAxes2 z x z x\n$Rots2 ($phi2_2) ($Phi_2) ($phi1_2) 90\n')
### Line for 1D calculations
fout.write('$ThetaRotX1 1\n$ThetaRotX2 1\n$ThetaMin -90\n$ThetaMax 90\n$DTheta 5')
### Line for 2D calculations
#fout.write('$ThetaMin 0\n$ThetaMax 360\n$DTheta 10\n$RMin 0\n$RMax 1\n$DR 0.1\n')
fout.close()
os.system('./wield '+str(infile)+' -n '+str(proc))
print (name, 'Boundary', i,'/',len(gbdata))
i = i +1
#elif dim ==2:
#e1 = convs.eultomat(Eul1[k,:])
#e2 = convs.eultomat(Eul2[k,:])
#gg1 = np.matmul(Base, e1)
#gg2 = np.matmul(Base, e2)
##### Create temporary input file for convenience
#infile = 'infile.in'
#fout = open(infile, 'w')
#fout.write('$Energy2D\ninclude ../'+lattice+'.in\n')
#fout.write('$OutFile ../bin/'+name+'/en2D_'+str(i)+'.out\n')
#fout.write('$AxisX1 '+str(gg1[0,0])+' '+str(gg1[1,0])+' '+str(gg1[2,0])+' \n$AxisY1 '+str(gg1[0,2])+' '+str(gg1[1,2])+' '+str(gg1[2,2])+'\n')
#fout.write('$AxisX1 '+str(gg2[0,0])+' '+str(gg2[1,0])+' '+str(gg2[2,0])+' \n$AxisY1 '+str(gg2[0,2])+' '+str(gg2[1,2])+' '+str(gg2[2,2])+'\n')
#fout.write('$ThetaRotX1 1 \n$ThetaRotX2 1\n')
#fout.write('$ThetaMin 0\n$ThetaMax 360 \n$DTheta 5\n')
#fout.write('$RMin 0\n$Rmax 1\n$DR 0.01')
#fout.close()
#os.system('./main infile.in -n '+str(proc))
##print 'Boundaries remaining:', len(gbdata)-i
else:
print('Wrong dimension')
print('Energy calculations for '+name+'finished.')
try:
os.system('mkdir '+pa_parent+'/ebsd_energy/outputs/'+name+'/energy')
except:
print ('path already exists')
os.system('mv -v '+path+'/'+name+'/ '+pa_parent+'/ebsd_energy/outputs/'+name+'/energy')
|
999,519 | 4e5f83c98aeebe4171013058c8474cb6152a5407 | #!/usr/bin/env python
from __future__ import print_function
'''Script that checks for differences at a given run number (or at the last IOV) between two Global Tags
'''
__author__ = 'Marco Musich'
__copyright__ = 'Copyright 2016, CERN CMS'
__credits__ = ['Giacomo Govi', 'Salvatore Di Guida']
__license__ = 'Unknown'
__maintainer__ = 'Marco Musich'
__email__ = 'marco.musich@cern.ch'
__version__ = 1
import datetime,time
import os,sys
import string, re
import subprocess
import calendar
from optparse import OptionParser,OptionGroup
from prettytable import PrettyTable
import CondCore.Utilities.conddblib as conddb
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#####################################################################
def getCommandOutput(command):
#####################################################################
"""This function executes `command` and returns it output.
Arguments:
- `command`: Shell command to be invoked by this function.
"""
child = os.popen(command)
data = child.read()
err = child.close()
if err:
print('%s failed w/ exit code %d' % (command, err))
return data
#################
def main():
### MAIN LOOP ###
if "CMSSW_RELEASE_BASE" in os.environ:
print("\n")
print("==================================================")
print("This script is powered by conddblib")
print("served to you by",os.getenv('CMSSW_RELEASE_BASE'))
print("==================================================\n")
else:
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
print("+ This tool needs CMSSW libraries")
print("+ Easiest way to get it is via CMSSW is")
print("+ cmsrel CMSSW_X_Y_Z #get your favorite")
print("+ cd CMSSW_X_Y_Z/src")
print("+ cmsenv")
print("+ cd -")
print("and then you can proceed")
print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
sys.exit(1)
desc="""This is a description of %prog."""
parser = OptionParser(description=desc,version='%prog version 0.1')
parser.add_option('-r','--run' ,help='test run number', dest='testRunNumber', action='store', default='251883')
parser.add_option('-R','--ReferenceGT',help='Reference Global Tag', dest='refGT', action='store', default='GR_H_V58C')
parser.add_option('-T','--TargetGT' ,help='Target Global Tag' , dest='tarGT', action='store', default='74X_dataRun2_HLTValidation_Queue')
parser.add_option('-L','--last' ,help='compares the very last IOV' , dest='lastIOV',action='store_true', default=False)
parser.add_option('-v','--verbose' ,help='returns more info', dest='isVerbose',action='store_true',default=False)
parser.add_option('-m','--match' ,help='print only matching',dest='stringToMatch',action='store',default='')
(opts, args) = parser.parse_args()
####################################
# Set up connections with the DB
####################################
con = conddb.connect(url = conddb.make_url("pro"))
session = con.session()
IOV = session.get_dbtype(conddb.IOV)
TAG = session.get_dbtype(conddb.Tag)
GT = session.get_dbtype(conddb.GlobalTag)
GTMAP = session.get_dbtype(conddb.GlobalTagMap)
RUNINFO = session.get_dbtype(conddb.RunInfo)
####################################
# Get the time info for the test run
####################################
bestRun = session.query(RUNINFO.run_number, RUNINFO.start_time, RUNINFO.end_time).filter(RUNINFO.run_number == int(opts.testRunNumber)).first()
if bestRun is None:
raise Exception("Run %s can't be matched with an existing run in the database." % opts.testRunNumber)
print("Run",opts.testRunNumber," |Start time",bestRun[1]," |End time",bestRun[2],".")
####################################
# Get the Global Tag snapshots
####################################
refSnap = session.query(GT.snapshot_time).\
filter(GT.name == opts.refGT).all()[0][0]
tarSnap = session.query(GT.snapshot_time).\
filter(GT.name == opts.tarGT).all()[0][0]
print("reference GT (",opts.refGT ,") snapshot: ",refSnap," | target GT (",opts.tarGT,") snapshot",tarSnap)
####################################
# Get the Global Tag maps
####################################
GTMap_ref = session.query(GTMAP.record, GTMAP.label, GTMAP.tag_name).\
filter(GTMAP.global_tag_name == opts.refGT).\
order_by(GTMAP.record, GTMAP.label).\
all()
GTMap_tar = session.query(GTMAP.record, GTMAP.label, GTMAP.tag_name).\
filter(GTMAP.global_tag_name == opts.tarGT).\
order_by(GTMAP.record, GTMAP.label).\
all()
text_file = open(("diff_%s_vs_%s.twiki") % (opts.refGT,opts.tarGT), "w")
differentTags = {}
for element in GTMap_ref:
RefRecord = element[0]
RefLabel = element[1]
RefTag = element[2]
for element2 in GTMap_tar:
if (RefRecord == element2[0] and RefLabel==element2[1]):
if RefTag != element2[2]:
differentTags[(RefRecord,RefLabel)]=(RefTag,element2[2])
####################################
## Search for Records,Label not-found in the other list
####################################
temp1 = [item for item in GTMap_ref if (item[0],item[1]) not in list(zip(list(zip(*GTMap_tar))[0],list(zip(*GTMap_tar))[1]))]
for elem in temp1:
differentTags[(elem[0],elem[1])]=(elem[2],"")
temp2 = [item for item in GTMap_tar if (item[0],item[1]) not in list(zip(list(zip(*GTMap_ref))[0],list(zip(*GTMap_ref))[1]))]
for elem in temp2:
differentTags[(elem[0],elem[1])]=("",elem[2])
text_file.write("| *Record* | *"+opts.refGT+"* | *"+opts.tarGT+"* | Remarks | \n")
t = PrettyTable()
if(opts.isVerbose):
t.field_names = ['/','',opts.refGT,opts.tarGT,refSnap,tarSnap]
else:
t.field_names = ['/','',opts.refGT,opts.tarGT]
t.hrules=1
if(opts.isVerbose):
t.add_row(['Record','label','Reference Tag','Target Tag','hash1:time1:since1','hash2:time2:since2'])
else:
t.add_row(['Record','label','Reference Tag','Target Tag'])
isDifferent=False
####################################
# Loop on the difference
####################################
for Rcd in sorted(differentTags):
# empty lists at the beginning
refTagIOVs=[]
tarTagIOVs=[]
if( differentTags[Rcd][0]!=""):
refTagIOVs = session.query(IOV.since,IOV.payload_hash,IOV.insertion_time).filter(IOV.tag_name == differentTags[Rcd][0]).all()
refTagInfo = session.query(TAG.synchronization,TAG.time_type).filter(TAG.name == differentTags[Rcd][0]).all()[0]
if( differentTags[Rcd][1]!=""):
tarTagIOVs = session.query(IOV.since,IOV.payload_hash,IOV.insertion_time).filter(IOV.tag_name == differentTags[Rcd][1]).all()
tarTagInfo = session.query(TAG.synchronization,TAG.time_type).filter(TAG.name == differentTags[Rcd][1]).all()[0]
if(differentTags[Rcd][0]!="" and differentTags[Rcd][1]!=""):
if(tarTagInfo[1] != refTagInfo[1]):
print(bcolors.WARNING+" *** Warning *** found mismatched time type for",Rcd,"entry. \n"+differentTags[Rcd][0],"has time type",refTagInfo[1],"while",differentTags[Rcd][1],"has time type",tarTagInfo[1]+". These need to be checked by hand. \n\n"+ bcolors.ENDC)
if(opts.lastIOV):
if(sorted(differentTags).index(Rcd)==0):
print("=== COMPARING ONLY THE LAST IOV ===")
lastSinceRef=-1
lastSinceTar=-1
for i in refTagIOVs:
if (i[0]>lastSinceRef):
lastSinceRef = i[0]
hash_lastRefTagIOV = i[1]
time_lastRefTagIOV = str(i[2])
for j in tarTagIOVs:
if (j[0]>lastSinceTar):
lastSinceTar = j[0]
hash_lastTarTagIOV = j[1]
time_lastTarTagIOV = str(j[2])
if(hash_lastRefTagIOV!=hash_lastTarTagIOV):
isDifferent=True
text_file.write("| ="+Rcd[0]+"= ("+Rcd[1]+") | =="+differentTags[Rcd][0]+"== | =="+differentTags[Rcd][1]+"== | | \n")
text_file.write("|^|"+hash_lastRefTagIOV+" <br> ("+time_lastRefTagIOV+") "+ str(lastSinceRef) +" | "+hash_lastTarTagIOV+" <br> ("+time_lastTarTagIOV+") " + str(lastSinceTar)+" | ^| \n")
if(opts.isVerbose):
t.add_row([Rcd[0],Rcd[1],differentTags[Rcd][0],differentTags[Rcd][1],str(hash_lastRefTagIOV)+"\n"+str(time_lastRefTagIOV)+"\n"+str(lastSinceRef),str(hash_lastTarTagIOV)+"\n"+str(time_lastTarTagIOV)+"\n"+str(lastSinceTar)])
else:
t.add_row([Rcd[0],Rcd[1],differentTags[Rcd][0]+"\n"+str(hash_lastRefTagIOV),differentTags[Rcd][1]+"\n"+str(hash_lastTarTagIOV)])
else:
### reset all defaults
theGoodRefIOV=-1
theGoodTarIOV=-1
sinceRefTagIOV=0
sinceTarTagIOV=0
RefIOVtime = datetime.datetime(1970, 1, 1, 0, 0, 0)
TarIOVtime = datetime.datetime(1970, 1, 1, 0, 0, 0)
theRefPayload=""
theTarPayload=""
theRefTime=""
theTarTime=""
### loop on the reference IOV list
for refIOV in refTagIOVs:
## logic for retrieving the the last payload active on a given IOV
## - the new IOV since is less than the run under consideration
## - the payload insertion time is lower than the GT snapshot
## - finall either:
## - the new IOV since is larger then the last saved
## - the new IOV since is equal to the last saved but it has a more recent insertion time
if ( (refIOV[0] <= int(opts.testRunNumber)) and (refIOV[0]>sinceRefTagIOV) or ((refIOV[0]==sinceRefTagIOV) and (refIOV[2]>RefIOVtime)) and (refIOV[2]<=refSnap) ):
sinceRefTagIOV = refIOV[0]
RefIOVtime = refIOV[2]
theGoodRefIOV=sinceRefTagIOV
theRefPayload=refIOV[1]
theRefTime=str(refIOV[2])
### loop on the target IOV list
for tarIOV in tarTagIOVs:
if ( (tarIOV[0] <= int(opts.testRunNumber)) and (tarIOV[0]>sinceTarTagIOV) or ((tarIOV[0]==sinceTarTagIOV) and (tarIOV[2]>=TarIOVtime)) and (tarIOV[2]<=tarSnap) ):
sinceTarTagIOV = tarIOV[0]
tarIOVtime = tarIOV[2]
theGoodTarIOV=sinceTarTagIOV
theTarPayload=tarIOV[1]
theTarTime=str(tarIOV[2])
#print Rcd[0],theRefPayload,theTarPayload
if(theRefPayload!=theTarPayload):
isDifferent=True
text_file.write("| ="+Rcd[0]+"= ("+Rcd[1]+") | =="+differentTags[Rcd][0]+"== | =="+differentTags[Rcd][1]+"== |\n")
text_file.write("|^|"+theRefPayload+" ("+theRefTime+") | "+theTarPayload+" ("+theTarTime+") |\n")
### determinte if it is to be shown
isMatched=False
tokens=opts.stringToMatch.split(",")
decisions = [bool(Rcd[0].find(x)!=-1) for x in tokens]
for decision in decisions:
isMatched = (isMatched or decision)
if(opts.isVerbose):
if (opts.stringToMatch=="" or isMatched):
t.add_row([Rcd[0],Rcd[1],differentTags[Rcd][0],differentTags[Rcd][1],str(theRefPayload)+"\n"+str(theRefTime)+"\n"+str(theGoodRefIOV),str(theTarPayload)+"\n"+str(theTarTime)+"\n"+str(theGoodTarIOV)])
else:
if (opts.stringToMatch=="" or isMatched):
t.add_row([Rcd[0],Rcd[1],differentTags[Rcd][0]+"\n"+str(theRefPayload),differentTags[Rcd][1]+"\n"+str(theTarPayload)])
if(not isDifferent):
if(opts.isVerbose):
t.add_row(["None","None","None","None","None","None"])
else:
t.add_row(["None","None","None"])
print(t)
if __name__ == "__main__":
main()
|
999,520 | 130101b3e72a1660df9174487c3d11bbc1d9f33a | """
General Programming Solutions.
This module contains answers to the quesions in the General Programming section
of the 100 Data Science questions set.
"""
from collections import defaultdict
from itertools import repeat
import numpy as np
"""
1. Write a function that converts a dictionary of equal length lists into a
list of dictionaries.
{'a': [1, 2, 3], 'b': [3, 2, 1]}
=> [{'a': 1, 'b': 3}, {'a': 2, 'b': 2}, {'a': 3, 'b': 1}]
"""
def dict_to_list(dictionary):
"""
Convert a dictionary of equal length lists into a list of dictionaries.
Args:
dictionary (dict): A dictionary of equal length lists.
Returns:
output (list): A list of dictionaries.
Example:
>>> dict_to_list({'a': [1, 2, 3], 'b': [3, 2, 1]})
[{'a': 1, 'b': 3}, {'a': 2, 'b': 2}, {'a': 3, 'b': 1}]
"""
zippers = []
for key, value in dictionary.items():
zippers.append(zip(repeat(key), value))
output = []
for zipper in zip(*zippers):
output.append(dict(zipper))
return output
"""
2. Write the inverse function to the previous problem. Convert a list of
dictionaries into a dictionary of equal length lists.
[{'a': 1, 'b': 3}, {'a': 2, 'b': 2}, {'a': 3, 'b': 1}]
=> {'a': [1, 2, 3], 'b': [3, 2, 1]}
"""
def list_to_dict(list_of_dicts):
"""
Convert a list of dictionaries into a dictionary of equal length lists.
Args:
list_of_dicts (list): A list of dictionaries.
Returns:
output (dict): A dictionary of equal length lists.
Example:
>>> my_list = [{'a': 1, 'b': 3}, {'a': 2, 'b': 2}, {'a': 3, 'b': 1}]
>>> list_to_dict(my_list)
{'a': [1, 2, 3], 'b': [3, 2, 1]}
"""
output = defaultdict(list)
for dict_ in list_of_dicts:
for key, value in dict_.items():
dict_[key].append(value)
return dict(output)
"""
3. Given a list of numbers representing the coefficients in a polynomial
(largest powers first), write a function that returns a pretty string
representation of the polynomial.
[1, 1, 1] => "x^2 + x + 1"
[2, -1, -2] => "2x^2 - x - 2"
[0, 9, -10] => "9x - 10"
"""
def list_to_poly(polynomial_list):
"""
Create pretty string representation from list of polynomials.
Convert a list of numbers representing the coefficients in a polynomial
into a pretty representation of the polynomial.
Args:
polynomial_list (list): A list of integers.
Returns:
output (string): A pretty string representation of the polynomial.
Example:
>>> list_to_poly([1, 1, 1])
"x^2 + x + 1"
>>> list_to_poly([2, -1, -2])
"2x^2 - x - 2"
"""
max_degree = len(polynomial_list) - 1
strings = []
opts = ['x', '']
for index, num in enumerate(polynomial_list):
if num == 0:
continue
if index < max_degree - 1:
string = '{}x^{}'.format(num, max_degree - index)
strings.append(string)
else:
strings.append(str(num) + opts[index - (max_degree - 1)])
polynomial = ' + '.join(strings).replace('+ -', '- ')
return polynomial
|
999,521 | f8940e4926bd2ac6ba29c70bcf349550a86e664d | ######################################VPC Details#############################################################
import boto3
from prettytable import PrettyTable
table = PrettyTable(['Region','VPC Id','VPC Name','VPC CIDR'])
REGIONS = [
'us-east-1',
'eu-west-1',
'ap-northeast-1'
]
for region in REGIONS:
client = boto3.client('ec2',region_name=region)
response = client.describe_vpcs()
for vpc in response['Vpcs']:
vpcId=vpc['VpcId']
vpcCidr=vpc['CidrBlock']
vpcName=''
if 'Tags' in vpc:
vpcName=next(item for item in vpc['Tags'] if item["Key"] == "Name")['Value']
table.add_row([region,vpcId, vpcName,vpcCidr])
print table
######################################Security Group Details###################################################
import boto3
from prettytable import PrettyTable
table = PrettyTable(['Region','Group Id','Group Name','VPC'])
REGIONS = [
'us-east-1',
'eu-west-1 ',
'ap-northeast-1'
]
for region in REGIONS:
client = boto3.client('ec2',region_name=region)
response = client.describe_security_groups()
for SecurityGroup in response['SecurityGroups']:
table.add_row([region,SecurityGroup['GroupId'],SecurityGroup['GroupName'],SecurityGroup['VpcId']])
print table
######################################Instance Details#########################################################
import boto3
from prettytable import PrettyTable
table = PrettyTable(['Region','InstanceId','ImageId','Instance Type','State'])
REGIONS = [
'us-east-1',
'eu-west-1',
'ap-northeast-1'
]
for region in REGIONS:
client = boto3.client('ec2',region_name=region)
Instances=client.describe_instances()['Reservations']
for instance in Instances:
state=instance['Instances'][0]['State']['Name']
instanceId=instance['Instances'][0]['InstanceId']
imageId=instance['Instances'][0]['ImageId']
instanceType=instance['Instances'][0]['InstanceType']
table.add_row([region,instanceId,imageId,instanceType,state])
print (table
############################################################################################
#!/usr/bin/env python
import boto.ec2
for region in [r for r in boto.ec2.regions() if r.name not in ['cn-north-1', 'us-gov-west-1']]:
conn = boto.ec2.connect_to_region(region.name)
reservations = conn.get_all_instances()
for r in reservations:
for i in r.instances:
print region.name, i.id,i.state
|
999,522 | 81b3e914ce3d692097f1a260c344c0c2fa39994b | """
X is a good number if after rotating each digit individually by 180 degrees, we get a valid number
that is different from X. Each digit must be rotated - we cannot choose to leave it alone.
A number is valid if each digit remains a digit after rotation. 0, 1, and 8 rotate to themselves;
2 and 5 rotate to each other (on this case they are rotated in a different direction, in other words
2 or 5 gets mirrored); 6 and 9 rotate to each other, and the rest of the numbers do not rotate to
any other number and become invalid.
Now given a positive number N, how many numbers X from 1 to N are good?
Example:
Input: 10
Output: 4
Explanation:
There are four good numbers in the range [1, 10] : 2, 5, 6, 9.
Note that 1 and 10 are not good numbers, since they remain unchanged after rotating.
Note:
N will be in range [1, 10000].
"""
class Solution:
def rotatedDigits(self, N: int) -> int:
nogood = [3, 4, 7]
good = [2, 5, 6, 9]
res = 0
for i in range(1, N + 1):
num = i
isgood = 0
while num > 0:
digit = num % 10
if digit in nogood:
isgood = 0
break
if digit in good:
isgood += 1
num //= 10
if isgood > 0:
res += 1
return res |
999,523 | e7e99aeabb70d34254a671b9bcda392b9c83ed42 | import preprocessing
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import csv
import pickle
from sklearn import svm
from collections import Counter
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import svm
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn import metrics
#ambil kamus stopword dalam class preprocessing
print "loading dictionary ... "
stop_words = [unicode(x.strip(), 'utf-8') for x in open('kamus/stopword.txt','r').read().split('\n')]
noise = [unicode(x.strip(), 'utf-8') for x in open('kamus/noise.txt','r').read().split('\n')]
stop_words.extend(noise)
print "Complate"
print "\n"
print "\n"
#persiapan data testing dan training
print "Preparing data ..."
train_df_raw = pd.read_csv('dataset_final/training90.csv',sep=';',names=['tweets','label'],header=None)
test_df_raw = pd.read_csv('dataset_final/testing10.csv',sep=';',names=['tweets','label'],header=None)
train_df_raw = train_df_raw[train_df_raw['tweets'].notnull()]
test_df_raw = test_df_raw[test_df_raw['tweets'].notnull()]
print "Complate"
print "\n"
print "\n"
#ambil data training
X_train=train_df_raw['tweets'].tolist()
#sample preprocessing
# for tweet in X_train:
# tweets=tweet
# pre=preprocessing.preprocess(tweets)
# fitur=preprocessing.get_fitur_all(pre)
# print fitur
#ambil data testing
X_test=test_df_raw['tweets'].tolist()
# print X_train
# print X_test
#ambil label
y_train=[x if x==1 else 0 for x in train_df_raw['label'].tolist()]
#tanpa cross validation , manual label (unseen data)
#y_test=[x if x=='positif' else 'negatif' for x in test_df_raw['label'].tolist()]
print "Pipelining process ..."
#proses pembobotan tf-idf
vectorizer = TfidfVectorizer(max_df=1.0, max_features=2000,
min_df=0, preprocessor=preprocessing.preprocess,
stop_words=stop_words,tokenizer=preprocessing.get_fitur
)
# vectorizer = TfidfVectorizer(max_df=1.0, max_features=10000,
# min_df=0, preprocessor=preprocessing.preprocess,
# stop_words=stop_words,vocabulary=preprocessing.get_fitur
# )
#fitur setalah dilakukan pembobotan
X_train=vectorizer.fit_transform(X_train).toarray()
X_test=vectorizer.transform(X_test).toarray()
#fitur
feature_names=vectorizer.get_feature_names()
# idf=vectorizer.idf_
#tampilkan fitur
#print feature_names
#jumlah fitur
print len(feature_names)
#menampilkan fitur yang sudah di tf-idf
# print dict(zip(vectorizer.get_feature_names(), idf))
# print len(vectorizer.get_feature_names(),idf)
#Hitung jumlah fitur
# print len(X_train)
# print len(X_test)
print "Complate"
print "\n"
print "classfication ..."
#klasifikasi support vector machine
clf=svm.SVC(kernel='linear',gamma=1)
clf.fit(X_train,y_train)
#simpan data training
#filesave='save_train/svmlinear9010.sav'
#pickle.dump(clf,open(filesave,'wb'))
#clf = pickle.load(open(filesave, 'rb'))
print "Complate"
print "\n"
#train model
skf=StratifiedKFold(n_splits=5,random_state=0)
scores=cross_val_score(clf,X_train,y_train,cv=skf)
precision_score=cross_val_score(clf,X_train,y_train,cv=skf,scoring='precision')
recall_score=cross_val_score(clf, X_train,y_train, cv=skf, scoring ='recall')
#scoring b
print "Result ..."
print "Recall :%0.2f"%recall_score.mean()
print "Precision :%0.2f"%precision_score.mean()
print "Accuracy :%0.2f"%scores.mean()
#prosentase grafik
weighted_prediction=clf.predict(X_test)
#print len(weighted_prediction)
"""
c=Counter(weighted_prediction)
plt.bar(c.keys(),c.values())
"""
#ambil nilai prediksi dari variabel weighted_prediction
labels, values = zip(*Counter(weighted_prediction).items())
indexes=np.arange(len(labels))
width=0.9
#print collections.Counter(weighted_prediction)
labels, values = zip(*Counter(weighted_prediction).items())
SentimenPositif=values[1]
SentimenNegatif=values[0]
#SentimenPositif.append(values[1])
#SentimenNegatif.append(values[0])
ind=np.arange(1)
width=0.8
#fig = plt.figure()
ax = plt.subplot(111)
#membuat grafik batang
yvals = SentimenPositif
rects1 = ax.bar(ind, yvals, width, color='blue')
zvals = SentimenNegatif
rects2 = ax.bar(ind+width, zvals, width, color='red')
ax.set_ylabel("Frequency")
ax.set_xticks(ind+width)
ax.set_xticklabels(("Result","a","b"))
ax.legend((rects1[0], rects2[0]), ('Positif', 'Negatif'))
for rect in rects1:
h = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2,0.99*h, '%d'%int(h),ha='center',va='bottom')
for rect in rects2:
h = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2,0.99*h, '%d'%int(h),ha='center',va='bottom')
#plt.axis([0,10, 0,300])
plt.title("Grafik Analisis Sentimen")
plt.show()
"""
plt.bar(indexes, values, width,color=['red', 'blue'])
labels=list(labels)
labels[0]='negatif'
labels[1]='positif'
labels=tuple(labels)
plt.title("Hasil Sentimen Analisis")
plt.xticks(indexes + width * 0.5, labels)
plt.ylabel('Scores')
plt.xlabel('Label')
plt.plot(kind='bar')
plt.show()
"""
#print collections.Counter(weighted_prediction)
"""
print 'Recall:', recall_score(y_test, weighted_prediction,
average='weighted')
print 'Precision:', precision_score(y_test, weighted_prediction,
average='weighted')
"""
|
999,524 | dcc52ce7c0ba1a5e68eaa70ba7ad0d5a46f71de9 | from flask import Flask
from apis import api_route, TwitApi, UsersApi, UsersTwitsApi
from variables import app_secret_key
from flask_restful import Api
from models import db, Users
from passwordhelper import PasswordHelper
from twits_blueprint import twits_blueprint
from login_blueprint import login_blueprint
app = Flask(__name__)
app.secret_key = app_secret_key
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://mytwits_user:mytwits_password@localhost/mytwits'
app.register_blueprint(twits_blueprint)
app.register_blueprint(login_blueprint)
ph = PasswordHelper()
db.init_app(app)
api = Api(app)
api.add_resource(TwitApi, api_route('twits/<int:twit_id>'), api_route('twits'))
api.add_resource(UsersApi, api_route('users/<int:user_id>'))
api.add_resource(UsersTwitsApi, api_route('users/<int:user_id>/twits'))
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=4000)
|
999,525 | 042715071788887b327b75904b9a490130f5c89a | import statistics
N = int(input())
AB = [map(int, input().split()) for _ in range(N)]
A,B = [list(i) for i in zip(*AB)]
if N%2 == 1:
low_med = statistics.median(A)
high_med = statistics.median(B)
print(high_med - low_med +1)
else:
low_med = statistics.median(A)
high_med = statistics.median(B)
print(int(2 * (high_med - low_med) +1)) |
999,526 | 3ba19e2c8ad777e1b79e6b2bced1f6339a5fdd15 | print("What is your name?")
n = input()
print(f"hello, {n}")
|
999,527 | 60f84dc802e6e8ec2946ec6a433fbb34955202f6 | from django.db import models
from django.contrib.auth import get_user_model
from .restaurant import Restaurant
#from django.db.models.signals import post_save
#from django.dispatch import receiver
User = get_user_model()
class Review(models.Model):
class Meta:
constraints = [
models.UniqueConstraint(
fields=['author', 'restaurant'], name='unique-author-restaurant'),
]
author = models.ForeignKey(
verbose_name='author',
null=False,
default=0,
to=User,
on_delete=models.CASCADE,
related_name='reviews'
)
restaurant = models.ForeignKey(
verbose_name='restaurant',
null=False,
default=0,
to=Restaurant,
on_delete=models.CASCADE,
related_name='reviews'
)
created = models.DateTimeField(
verbose_name='creation date-time',
auto_now_add=True
)
modified = models.DateTimeField(
verbose_name='modification date-time',
auto_now_add=True
)
rating = models.DecimalField(
verbose_name='rating',
max_digits=2,
decimal_places=1,
null=False
)
content = models.TextField(
verbose_name='content',
null=False
)
# LISTENERS #
# @receiver(post_save, sender = Review)
# def update_restaurant_secondary_columns(sender, **kwargs):
# '''Automatically creates blank user profile when a new user is registered
# '''
# if kwargs['created']:
# review = kwargs['instance']
# #print('from update_restaurant_secondary_columns:', review.author, review.restaurant, review.rating)
|
999,528 | 4e95d2b524e842348391cd905a2d7c91fc1cf328 | #!/usr/bin/env python
#---------------
#RMS 2016
#---------------
#Program to quickly view and filter seismograms, found in the directory structure provided by obspyDMT
#User loop of the Seismoviewer program
import os
import sys
import glob
import argparse
from SeismoViewer import SeismoView
import time
parser = argparse.ArgumentParser()
parser.add_argument('-path',action='store',dest='inputpath',help='The full file path to the data you want to prepare for a tomography project')
parser.add_argument('-phase',action='store',dest='phase',help='The seismic phase you are interested in: This determines which SAC files are accessed and what the final output is. Choose from S or P')
parser.add_argument('-cleanup',action='store_true',dest='cleanup',help='If set, output files from the program will be deleted when it ends')
parser.add_argument('-fulltrace',action='store_true',dest='fulltrace',help='If set, the program will plot the entire trace, otherwise it will slice around the arrival of interest')
results = parser.parse_args()
#may want to run the viewer in various modes - possibly to control the length of the traces that are displayed etc
def userloop(path,ph,fulltrace=None):
''' User decisions about how to manipulate the data '''
#Get the eventID
eventID = path.split('/')[-2]
print eventID
#Load the data into a Seimoview object
SV = SeismoView(path)
SV.multiload()
#Don't necessarily need to use this option, but there for now
#print fulltrace
if fulltrace:
SV.preservetraces()
else:
SV.slicetraces(phase=ph)
#define filter bands
filterbands = None
#Now the user decides what to do with it
while 1:
usercommand = raw_input('SV>: ')
#exit command
if usercommand == 'q':
sys.exit(1)
elif usercommand == 'n':
return None
#plot command
elif usercommand == 'p':
if filterbands:
basename = '%s_%s_%s_data' %(eventID,b1,b2)
else:
basename = '%s_data' %eventID
#Save the data components to be plotted
SV.arraytracesave(basename)
#Run the plotting command - the prompt will be freed again once the user quits the plot
#This one is for full seismogram plotting
if fulltrace:
os.system('./plot_quakes_full.py %s %s' %(basename+'.npy',basename+'.dat'))
else:
os.system('./plot_quakes_sliced.py %s %s' %(basename+'.npy',basename+'.dat'))
#flag command - creates or appends to a file containing flagged events
elif usercommand[:4] == 'flag':
outfilename = 'Flagged_events_'+path.split('/')[-3]+'.dat'
now = time.strftime("%c")
#Append flag time, event name and user comments to the file
if os.path.isfile(outfilename):
outfile = open(outfilename,'a')
outfile.write(now)
outfile.write('\n')
outfile.write('%s %s\n' %(eventID,usercommand))
outfile.close()
else:
outfile = open(outfilename,'w')
outfile.write(now)
outfile.write('\n')
outfile.write('%s %s\n' %(eventID,usercommand))
outfile.close()
# go to another event name
elif usercommand[:2] == 'ID':
IDname = usercommand.split()[1]
print 'Going to event %s' %IDname
return IDname
#filter command
elif usercommand[:2] == 'bp':
try:
command = usercommand.split()
b1 = float(command[1])
b2 = float(command[2])
if b2 <= b1:
print 'filter band b2 cannot be larger than b1'
else:
filterbands=True
print 'filtering'
SV.filterslicestream(b1,b2)
except:
print 'filter command %s not recognized' %usercommand
elif usercommand == 'map':
#Makes a station-event map for this configuration, with the distances
print '---------------------------------'
print 'Making station-event map'
print '---------------------------------'
SV.map_event_stations()
else:
print 'Command not recognized'
def main():
cwd = os.getcwd()
path = results.inputpath
phase = results.phase
clean = results.cleanup
fulltrace = results.fulltrace
print fulltrace
if (phase != 'P'):
print 'input phase %s not recognized' %phase
if not os.path.isdir(path):
print 'Input directory %s not found' %path
sys.exit(1)
events = glob.glob('%s/20*' %path)
if len(events) == 0:
print 'Cound not find any event directories in folder %s' %path
sys.exit(1)
print '\n------------------------\n'
print 'Found %g events' %len(events)
print '\n------------------------\n'
newevent = None
for event in events:
if os.path.isdir('%s/BH_VEL' %event):
print 'Found BH_VEL'
if newevent:
IDname = userloop('%s/BH_VEL' %newevent,phase,fulltrace=fulltrace)
newevent = None
else:
IDname = userloop('%s/BH_VEL' %event,phase,fulltrace=fulltrace)
if IDname:
neweventparths = event.split('/')
neweventparths[-1] = IDname
newevent = '/'.join(neweventparths)
if clean:
print 'Cleanup -- removing .npy and .dat files'
os.system('rm -rf *_data.npy *_data.dat')
elif os.path.isdir('%s/BH_RAW' %event):
print 'Found BH_RAW'
if newevent:
IDname = userloop('%s/BH_RAW' %newevent,phase,fulltrace=fulltrace)
newevent = None
else:
IDname = userloop('%s/BH_RAW' %event,phase,fulltrace=fulltrace)
if IDname:
neweventparths = event.split('/')
neweventparths[-1] = IDname
newevent = '/'.join(neweventparths)
if clean:
print 'Cleanup -- removing .npy and .dat files'
os.system('rm -rf *_data.npy *_data.dat')
else:
print 'No viable data directory found in event %s' %event
if __name__ == '__main__':
main()
|
999,529 | 58d3efb0bcc689aaa05211514a980ba2bf3cf990 | class AddDigits(object):
def addDigits(self, num):
"""
Problem stste : Given a non-negative integer num, repeatedly add all its digits until the result has only one digit.
( For example:Given num = 38, the process is like: 3 + 8 = 11, 1 + 1 = 2. Since 2 has only one digit, return it. )
type num: int
return type: int
Idea : Without recursive approach, we scan the input value from highest bit and plus each digit frequently.
When the sum grater than 10, we should minus 9 ( also means minus 10 and plus 1 ), than proceed to scan next digit
"""
i_rtVal = 0
#Value check, direct return if the input value less than 10 ( only 1 digit )
if num<10:
i_rtVal = num
else:
#Transfer the input value to string
str_num = str(num)
#Go scan for each digit from highest one
for each_id in range(len(str_num)):
#When digit scanned, add the digit to return value
i_rtVal = i_rtVal + ord(str_num[each_id])-ord('0')
#If the return value grater than 9, minus 10 and plus 1
if i_rtVal>9:
i_rtVal = i_rtVal-10+1
else:
pass
return i_rtVal |
999,530 | d5e91acea1314e4a34bacc762e78ca527b7e408d | # coding: utf-8
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from database import Database
from article import Article
from helper import Helper
import csv
import pymongo
from pymongo import MongoClient
import math
import threading
from probabilistic_model import ProbabilisticModel
from model2 import Model_2
import multiprocessing
def article_learning(index_article):
mbcon = 0.03
mtcon = 0.03
mbsup = 100
mtsup = 100
probabilistic_model = ProbabilisticModel(mbcon, mtcon, mbsup, mtsup, 1)
probabilistic_model.article_learning_01(index_article)
if __name__ == '__main__':
# rule 0 and 1
array_data_file = {
'AA': 100,
'AB': 100,
'AC': 100,
'AD': 100,
'AE': 100,
'AF': 100,
'AG': 100,
'AH': 82
}
mbcon = 0.0015
mtcon = 0.0015
mbsup = 50
mtsup = 50
c = 0.0077
module_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + \
'/word_recognition'
bigram_path = module_path + '/result_data/bigram.pkl'
bigram_hash = Helper.load_obj(bigram_path)
occurrences_data_path = module_path + '/result_data/occurrences.pkl'
statistic_bigram = Helper.load_obj(occurrences_data_path)
word_array = set()
lmc_more_2 = {}
for folder, max_file in array_data_file.iteritems():
print '=======================***%s***=======================' % folder
for index_file in range(0, max_file):
if index_file < 10:
file_name = '0' + str(index_file)
else:
file_name = str(index_file)
print 'Start handle data in file %s in folder %s' % (file_name, folder)
wiki_data_path = '/viwiki_data/' + folder + '/wiki_' + file_name
wiki_data_path = module_path + wiki_data_path
doc_array = Helper.load_wiki_data(wiki_data_path)
position_file = '%s_%s' % (folder, file_name)
for index, doc in enumerate(doc_array):
position_file = '%i_%s_%s' % (index, file_name, folder)
probabilistic_model = ProbabilisticModel(mbcon, mtcon, mbsup, mtsup, 0.0077,
bigram_hash, statistic_bigram, word_array, lmc_more_2)
probabilistic_model.article_learning_01(position_file, doc)
word_array = probabilistic_model.word_array
lmc_more_2 = probabilistic_model.lmc_more_2
print 'Has %i word detect' % len(word_array)
print 'Has %i lmc need handle' % len(lmc_more_2)
two_syllables_path = module_path + '/result_data/two_syllables_word.pkl'
Helper.save_obj(word_array, two_syllables_path)
lmc_more_2_path = module_path + '/result_data/lmc_more_2.pkl'
Helper.save_obj(lmc_more_2, lmc_more_2_path)
two_syllables_path = module_path + '/result_data/two_syllables_word.pkl'
word_array = Helper.load_obj(two_syllables_path)
lmc_more_2_path = module_path + '/result_data/lmc_more_2.pkl'
lmc_more_2 = Helper.load_obj(lmc_more_2_path)
probabilistic_model = ProbabilisticModel(mbcon, mtcon, mbsup, mtsup, 0.0077,
bigram_hash, statistic_bigram, word_array, lmc_more_2)
probabilistic_model.rule_2_and_3()
word_array = probabilistic_model.word_array
lmc_more_2 = probabilistic_model.lmc_more_2
print 'Has %i word detect' % len(word_array)
two_syllables_path = module_path + '/result_data/two_syllables_word.pkl'
Helper.save_obj(word_array, two_syllables_path)
|
999,531 | ab77e28339b1311c05de1242767cff1b326ff7d5 | import unittest
from tensor import Tensor
class TestTensorMethods(unittest.TestCase):
def test_Tensor_creation(self):
root = Tensor([1], [1])
self.assertIsNone(root.children)
self.assertListEqual([1], root.lst)
def test_children_Tensor(self):
root = Tensor([ 1, 2, 3, 4, 5, 6 ], [ 2, 3 ])
self.assertIsNotNone(root.children)
self.assertListEqual(root.children[0].lst, [ 1, 2, 3 ])
self.assertListEqual(root.children[1].lst, [ 4, 5, 6 ])
def test_tree_three_levels(self):
root = Tensor([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 ], [ 4, 2, 3 ])
self.assertIsNotNone(root.children)
self.assertListEqual(root.children[0].children[0].lst, [1, 2, 3])
self.assertListEqual(root.children[3].children[1].lst, [22, 23, 24])
def test_print_tree_no_children(self):
root = Tensor([1, 2, 3], [3])
actual_result = root.print_tree()
expected_result = "[1, 2, 3]"
self.assertEqual(expected_result, actual_result)
def test_print_tree_one_set_of_children(self):
root = Tensor( [0, 1, 2, 3, 4, 5, 0.1, 0.2, -3, -2, -1, 3, 2, 1], [5, 2])
actual_result = root.print_tree()
expected_result = "[[0, 1], [2, 3], [4, 5], [0.1, 0.2], [-3, -2]]"
self.assertEqual(expected_result, actual_result)
def test_print_tree_two_sets_of_children(self):
root = Tensor( [0, 1, 2, 3, 4, 5, 0.1, 0.2, -3], [2, 3, 2])
actual_result = root.print_tree()
expected_result = "[[[0, 1], [2, 3], [4, 5]], [[0.1, 0.2], [-3, 0], [0, 0]]]"
self.assertEqual(expected_result, actual_result)
def test_empty_shape(self):
root = Tensor( [0, 1, 2, 3, 4, 5, 0.1, 0.2, -3], [])
actual_result = root.print_tree()
expected_result = "[]"
self.assertEqual(expected_result, actual_result)
def test_shape_error_negative_value(self):
with self.assertRaises(Exception) as context:
root = Tensor( [1, 2, 3], [-1,2,3] )
self.assertTrue("Shape is not of the correct format." in context.exception)
def test_shape_error_not_integer(self):
with self.assertRaises(Exception) as context:
root = Tensor( [1, 2, 3], [-1,2.3,3] )
self.assertTrue("Shape is not of the correct format." in context.exception)
def test_data_with_nonnumeric(self):
with self.assertRaises(Exception) as context:
root = Tensor( "Hello", "World!", [1,2])
self.assertTrue("Data has a non-numeric value." in context.exception) |
999,532 | e72172e392eed6b1d981ff095068870c87b3b78c | # Copyright (c) 2014 Universidade Federal Fluminense (UFF)
# Copyright (c) 2014 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
from __future__ import (absolute_import, print_function,
division, unicode_literals)
import os
from ..persistence import persistence
from ..utils import print_msg
from .command import Command
class List(Command):
def add_arguments(self):
add_arg = self.add_argument
add_arg('--dir', type=str,
help='set project path where is the database. Default to '
'current directory')
def execute(self, args):
persistence.connect_existing(args.dir or os.getcwd())
print_msg('trials available in the provenance store:', True)
for trial in persistence.load('trial'):
text = ' Trial {id}: {script} {arguments}'.format(**trial)
indent = text.index(': ') + 2
print(text)
print(
'{indent}with code hash {code_hash}'
''.format(indent=' ' * indent, **trial))
print(
'{indent}ran from {start} to {finish}'
''.format(indent=' ' * indent, **trial))
|
999,533 | 60efda5e0be9fd8f75e73a275b29003920b59fdb | species(
label = 'S(227)(226)',
structure = SMILES('C=CC=CC(O)O[O]'),
E0 = (-78.6532,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,3615,1277.5,1000,180,180],'cm^-1')),
HinderedRotor(inertia=(0.60723,'amu*angstrom^2'), symmetry=1, barrier=(13.9614,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.606212,'amu*angstrom^2'), symmetry=1, barrier=(13.938,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.606969,'amu*angstrom^2'), symmetry=1, barrier=(13.9554,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.606708,'amu*angstrom^2'), symmetry=1, barrier=(13.9494,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4426.56,'J/mol'), sigma=(7.01777,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=691.42 K, Pc=29.06 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.475664,0.0774889,-8.0698e-05,4.44867e-08,-9.89095e-12,-9332.79,29.8388], Tmin=(100,'K'), Tmax=(1084.84,'K')), NASAPolynomial(coeffs=[13.9574,0.0277788,-1.19634e-05,2.24672e-09,-1.56684e-13,-12257.9,-36.3003], Tmin=(1084.84,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-78.6532,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(ROOJ)"""),
)
species(
label = 'O2(2)(2)',
structure = SMILES('[O][O]'),
E0 = (-8.62178,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1483.7],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (31.9988,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(887.157,'J/mol'), sigma=(3.467,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.53764,-0.00122828,5.36759e-06,-4.93128e-09,1.45955e-12,-1037.99,4.6718], Tmin=(100,'K'), Tmax=(1087.71,'K')), NASAPolynomial(coeffs=[3.16427,0.00169454,-8.00335e-07,1.5903e-10,-1.14891e-14,-1048.45,6.08303], Tmin=(1087.71,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.62178,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""O2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C5H7O(224)(223)',
structure = SMILES('[CH2]C=CC=CO'),
E0 = (-32.8413,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,2995,3005,3015,3025,975,983.333,991.667,1000,1300,1325,1350,1375,400,433.333,466.667,500,1630,1646.67,1663.33,1680,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(1.31979,'amu*angstrom^2'), symmetry=1, barrier=(30.3447,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.32385,'amu*angstrom^2'), symmetry=1, barrier=(30.4378,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.31983,'amu*angstrom^2'), symmetry=1, barrier=(30.3455,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (83.1085,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3794.25,'J/mol'), sigma=(6.17562,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=592.65 K, Pc=36.55 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.25554,0.0396093,3.48643e-05,-9.07575e-08,4.31029e-11,-3831.66,21.0706], Tmin=(100,'K'), Tmax=(908.805,'K')), NASAPolynomial(coeffs=[21.7542,0.00426328,2.6289e-06,-6.6847e-10,4.32897e-14,-9823.72,-88.3316], Tmin=(908.805,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-32.8413,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + radical(C=CC=CCJ)"""),
)
species(
label = 'HO2(8)(9)',
structure = SMILES('[O]O'),
E0 = (2.67648,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1112.81,1388.53,3298.45],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (33.0067,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(892.977,'J/mol'), sigma=(3.458,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.02956,-0.00263985,1.5223e-05,-1.71671e-08,6.26738e-12,322.677,4.84428], Tmin=(100,'K'), Tmax=(923.913,'K')), NASAPolynomial(coeffs=[4.15133,0.00191146,-4.11274e-07,6.34957e-11,-4.86385e-15,83.4208,3.09341], Tmin=(923.913,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(2.67648,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""HO2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C5H6O(217)(216)',
structure = SMILES('C=CC=CC=O'),
E0 = (-29.5668,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,2782.5,750,1395,475,1775,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.9508,'amu*angstrom^2'), symmetry=1, barrier=(21.8608,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.95303,'amu*angstrom^2'), symmetry=1, barrier=(21.912,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (82.1005,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3519.98,'J/mol'), sigma=(5.63814,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=549.81 K, Pc=44.56 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.58677,0.0449815,-2.33922e-05,-1.17435e-10,2.4605e-12,-3462.46,19.7432], Tmin=(100,'K'), Tmax=(1159.43,'K')), NASAPolynomial(coeffs=[12.6828,0.020301,-9.05764e-06,1.75766e-09,-1.25367e-13,-6949.62,-39.3724], Tmin=(1159.43,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-29.5668,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cd-Cd(CO)H) + group(Cds-CdsHH) + group(Cds-O2d(Cds-Cds)H)"""),
)
species(
label = 'OH(5)(5)',
structure = SMILES('[OH]'),
E0 = (28.372,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3287.46],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (17.0073,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.4858,0.00133397,-4.70043e-06,5.64379e-09,-2.06318e-12,3411.96,1.99788], Tmin=(100,'K'), Tmax=(1005.25,'K')), NASAPolynomial(coeffs=[2.88225,0.00103869,-2.35652e-07,1.40229e-11,6.34581e-16,3669.56,5.59053], Tmin=(1005.25,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(28.372,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""OH""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=CC=C[CH]O[O](2605)',
structure = SMILES('[CH2]C=CC=CO[O]'),
E0 = (243.658,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2995,3005,3015,3025,975,983.333,991.667,1000,1300,1325,1350,1375,400,433.333,466.667,500,1630,1646.67,1663.33,1680,3000,3100,440,815,1455,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.920477,'amu*angstrom^2'), symmetry=1, barrier=(21.1636,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.92045,'amu*angstrom^2'), symmetry=1, barrier=(21.163,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.920435,'amu*angstrom^2'), symmetry=1, barrier=(21.1626,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (98.0999,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.19261,0.0517996,-2.08373e-05,-1.61991e-08,1.20077e-11,29415.5,26.0962], Tmin=(100,'K'), Tmax=(937.741,'K')), NASAPolynomial(coeffs=[15.1311,0.0171574,-5.11526e-06,8.4127e-10,-5.79576e-14,25710.4,-46.0702], Tmin=(937.741,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(243.658,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(295.164,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-Cd)) + group(O2s-OsH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + radical(ROOJ) + radical(C=CC=CCJ)"""),
)
species(
label = 'H(3)(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=CC=CC([O])O[O](7130)',
structure = SMILES('C=CC=CC([O])O[O]'),
E0 = (147.052,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,238.978,239.718,239.956],'cm^-1')),
HinderedRotor(inertia=(0.267201,'amu*angstrom^2'), symmetry=1, barrier=(10.852,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.265643,'amu*angstrom^2'), symmetry=1, barrier=(10.8528,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.26736,'amu*angstrom^2'), symmetry=1, barrier=(10.852,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.099,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.887179,0.0739964,-8.9668e-05,6.44041e-08,-1.94919e-11,17793.4,29.2337], Tmin=(100,'K'), Tmax=(793.237,'K')), NASAPolynomial(coeffs=[8.51287,0.0355434,-1.69548e-05,3.2939e-09,-2.324e-13,16583.6,-5.78956], Tmin=(793.237,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(147.052,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(CCOJ) + radical(ROOJ)"""),
)
species(
label = 'CH2CHCHCH(913)',
structure = SMILES('[CH]=CC=C'),
E0 = (346.45,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2995,3025,975,1000,1300,1375,400,500,1630,1680,3120,650,792.5,1650],'cm^-1')),
HinderedRotor(inertia=(1.31937,'amu*angstrom^2'), symmetry=1, barrier=(30.3349,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (53.0825,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.64255,0.0163337,3.86225e-05,-6.71377e-08,2.83603e-11,41729.6,13.282], Tmin=(100,'K'), Tmax=(937.724,'K')), NASAPolynomial(coeffs=[12.9705,0.00669127,-1.0007e-06,1.67602e-10,-1.71436e-14,38279.7,-43.9476], Tmin=(937.724,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(346.45,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(203.705,'J/(mol*K)'), label="""CH2CHCHCH""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[O]O[CH]O(7358)',
structure = SMILES('[O]O[CH]O'),
E0 = (9.28844,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,3025,407.5,1350,352.5,492.5,1135,1000],'cm^-1')),
HinderedRotor(inertia=(0.156407,'amu*angstrom^2'), symmetry=1, barrier=(3.59612,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156804,'amu*angstrom^2'), symmetry=1, barrier=(3.60523,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (62.0248,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.58857,0.0452524,-0.00011197,1.22556e-07,-4.65581e-11,1154.51,14.2974], Tmin=(100,'K'), Tmax=(891.504,'K')), NASAPolynomial(coeffs=[-0.0459,0.0212294,-1.12418e-05,2.13324e-09,-1.41938e-13,3048.61,34.6933], Tmin=(891.504,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(9.28844,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(170.447,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(ROOJ) + radical(OCJO)"""),
)
species(
label = 'C=CC=C[C](O)O[O](7359)',
structure = SMILES('[CH2]C=CC=C(O)O[O]'),
E0 = (86.5216,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([492.5,1135,1000,3000,3100,440,815,1455,1000,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,350,440,435,1725,3615,1277.5,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.599637,'amu*angstrom^2'), symmetry=1, barrier=(13.7868,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.600111,'amu*angstrom^2'), symmetry=1, barrier=(13.7977,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.13495,'amu*angstrom^2'), symmetry=1, barrier=(26.0948,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.599616,'amu*angstrom^2'), symmetry=1, barrier=(13.7864,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.099,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.249628,0.0781669,-8.65039e-05,4.94876e-08,-1.10922e-11,10544.9,29.2669], Tmin=(100,'K'), Tmax=(1096.33,'K')), NASAPolynomial(coeffs=[16.1613,0.0201118,-7.07173e-06,1.18503e-09,-7.74178e-14,7056.06,-48.961], Tmin=(1096.33,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(86.5216,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-Cd)) + group(O2s-(Cds-Cd)H) + group(O2s-OsH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsCsCs) + radical(ROOJ) + radical(C=CC=CCJ)"""),
)
species(
label = 'C=CC=[C]C(O)O[O](7360)',
structure = SMILES('C=CC=[C]C(O)O[O]'),
E0 = (159.189,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,1685,370,2995,3025,975,1000,1300,1375,400,500,1630,1680,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,492.5,1135,1000,180,180],'cm^-1')),
HinderedRotor(inertia=(0.569127,'amu*angstrom^2'), symmetry=1, barrier=(13.0854,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.569458,'amu*angstrom^2'), symmetry=1, barrier=(13.093,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.569151,'amu*angstrom^2'), symmetry=1, barrier=(13.0859,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.569176,'amu*angstrom^2'), symmetry=1, barrier=(13.0865,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.099,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.459432,0.0817952,-0.000104066,7.1487e-08,-1.97592e-11,19270,30.3777], Tmin=(100,'K'), Tmax=(881.376,'K')), NASAPolynomial(coeffs=[12.2814,0.0281428,-1.27554e-05,2.42014e-09,-1.68517e-13,17186.1,-25.1635], Tmin=(881.376,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(159.189,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(Cds_S) + radical(ROOJ)"""),
)
species(
label = 'C2H3(28)(29)',
structure = SMILES('[CH]=C'),
E0 = (286.361,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,677.08,1086.68,3788.01],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (27.0452,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1737.73,'J/mol'), sigma=(4.1,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.36378,0.000265766,2.79621e-05,-3.72987e-08,1.5159e-11,34475,7.9151], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[4.15027,0.00754021,-2.62998e-06,4.15974e-10,-2.45408e-14,33856.6,1.72812], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(286.361,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), label="""C2H3""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH]=CC(O)O[O](7361)',
structure = SMILES('[CH]=CC(O)O[O]'),
E0 = (116.671,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,3120,650,792.5,1650,1380,1390,370,380,2900,435,3010,987.5,1337.5,450,1655,492.5,1135,1000],'cm^-1')),
HinderedRotor(inertia=(0.459796,'amu*angstrom^2'), symmetry=1, barrier=(10.5716,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.459704,'amu*angstrom^2'), symmetry=1, barrier=(10.5695,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.459674,'amu*angstrom^2'), symmetry=1, barrier=(10.5688,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (88.0621,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.80913,0.0543703,-6.66677e-05,3.28693e-08,1.44191e-12,14105.2,22.8668], Tmin=(100,'K'), Tmax=(576,'K')), NASAPolynomial(coeffs=[8.244,0.0211563,-1.00494e-05,1.91807e-09,-1.32744e-13,13173.6,-6.27994], Tmin=(576,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(116.671,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(220.334,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + radical(Cds_P) + radical(ROOJ)"""),
)
species(
label = 'C=C[C]=CC(O)O[O](7362)',
structure = SMILES('C=C[C]=CC(O)O[O]'),
E0 = (120.342,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,1685,370,2995,3025,975,1000,1300,1375,400,500,1630,1680,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,492.5,1135,1000,180,180],'cm^-1')),
HinderedRotor(inertia=(0.711232,'amu*angstrom^2'), symmetry=1, barrier=(16.3526,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.712078,'amu*angstrom^2'), symmetry=1, barrier=(16.3721,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.711266,'amu*angstrom^2'), symmetry=1, barrier=(16.3534,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.711587,'amu*angstrom^2'), symmetry=1, barrier=(16.3608,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.099,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.289161,0.0852885,-0.000114123,8.30173e-08,-2.41438e-11,14604.1,30.1067], Tmin=(100,'K'), Tmax=(841.914,'K')), NASAPolynomial(coeffs=[12.3456,0.028009,-1.20742e-05,2.2126e-09,-1.50139e-13,12573.9,-25.9845], Tmin=(841.914,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(120.342,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C) + radical(ROOJ)"""),
)
species(
label = 'C=[C]C=CC(O)O[O](7363)',
structure = SMILES('C=C=C[CH]C(O)O[O]'),
E0 = (104.425,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,3615,1277.5,1000,3010,987.5,1337.5,450,1655,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,492.5,1135,1000,540,610,2055,180,180],'cm^-1')),
HinderedRotor(inertia=(0.880655,'amu*angstrom^2'), symmetry=1, barrier=(20.248,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.880537,'amu*angstrom^2'), symmetry=1, barrier=(20.2453,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.880741,'amu*angstrom^2'), symmetry=1, barrier=(20.25,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.6583,'amu*angstrom^2'), symmetry=1, barrier=(38.1275,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.099,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.418222,0.0824927,-9.66286e-05,6.02512e-08,-1.5212e-11,12685.3,28.3901], Tmin=(100,'K'), Tmax=(957.724,'K')), NASAPolynomial(coeffs=[13.0544,0.0297163,-1.39687e-05,2.71149e-09,-1.91973e-13,10264.9,-32.0263], Tmin=(957.724,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(104.425,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cdd-CdsCds) + radical(C=CCJCO) + radical(ROOJ)"""),
)
species(
label = '[CH]=CC=CC(O)O[O](7364)',
structure = SMILES('[CH]=CC=CC(O)O[O]'),
E0 = (168.443,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,3120,650,792.5,1650,1380,1390,370,380,2900,435,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,492.5,1135,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.642974,'amu*angstrom^2'), symmetry=1, barrier=(14.7832,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.642923,'amu*angstrom^2'), symmetry=1, barrier=(14.7821,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.642757,'amu*angstrom^2'), symmetry=1, barrier=(14.7782,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.64296,'amu*angstrom^2'), symmetry=1, barrier=(14.7829,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (114.099,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.414976,0.0808008,-9.63884e-05,6.03653e-08,-1.5088e-11,20386.5,30.464], Tmin=(100,'K'), Tmax=(974.617,'K')), NASAPolynomial(coeffs=[13.8935,0.0254812,-1.12461e-05,2.1242e-09,-1.48232e-13,17759.3,-34.2155], Tmin=(974.617,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(168.443,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(Cds_P) + radical(ROOJ)"""),
)
species(
label = 'CC=CC=C(O)O[O](7365)',
structure = SMILES('CC=CC=C(O)O[O]'),
E0 = (-31.5342,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([350,440,435,1725,492.5,1135,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,3615,1277.5,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.594335,'amu*angstrom^2'), symmetry=1, barrier=(13.6649,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.595335,'amu*angstrom^2'), symmetry=1, barrier=(13.6879,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.594405,'amu*angstrom^2'), symmetry=1, barrier=(13.6665,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.594252,'amu*angstrom^2'), symmetry=1, barrier=(13.663,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.182477,0.084482,-0.000100396,6.29376e-08,-1.56534e-11,-3655.61,27.9562], Tmin=(100,'K'), Tmax=(983.323,'K')), NASAPolynomial(coeffs=[14.7002,0.0254279,-1.03149e-05,1.86629e-09,-1.27029e-13,-6510.79,-41.8394], Tmin=(983.323,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-31.5342,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-Cd)) + group(O2s-(Cds-Cd)H) + group(O2s-OsH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsCsCs) + radical(ROOJ)"""),
)
species(
label = '[O]OC(O)C1C=CC1(7366)',
structure = SMILES('[O]OC(O)C1C=CC1'),
E0 = (-23.8334,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.864061,0.0575622,-2.31389e-05,-1.22537e-08,8.90783e-12,-2743.64,29.2466], Tmin=(100,'K'), Tmax=(1022.21,'K')), NASAPolynomial(coeffs=[15.4596,0.0243787,-9.55986e-06,1.79125e-09,-1.27901e-13,-6977.82,-47.6042], Tmin=(1022.21,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-23.8334,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-(Cds-Cds)CsCsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(Cyclobutene) + radical(ROOJ)"""),
)
species(
label = 'C=C[CH]C1OOC1O(7350)',
structure = SMILES('C=C[CH]C1OOC1O'),
E0 = (-55.2269,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.499151,0.0708451,-5.71247e-05,2.30532e-08,-3.74359e-12,-6511.35,24.1797], Tmin=(100,'K'), Tmax=(1452.32,'K')), NASAPolynomial(coeffs=[16.4836,0.0268203,-1.16546e-05,2.18071e-09,-1.50634e-13,-11154.3,-58.9009], Tmin=(1452.32,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-55.2269,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(Cs-CsCsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12dioxetane) + radical(C=CCJCO)"""),
)
species(
label = '[CH2]C1C=CC(O)OO1(7351)',
structure = SMILES('[CH2]C1C=CC(O)OO1'),
E0 = (-66.8177,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.940765,0.0473136,2.28984e-05,-6.99474e-08,3.13536e-11,-7908.21,25.92], Tmin=(100,'K'), Tmax=(973.244,'K')), NASAPolynomial(coeffs=[19.9696,0.0176509,-6.20437e-06,1.23907e-09,-9.73149e-14,-13911.3,-77.1789], Tmin=(973.244,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-66.8177,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsOsOsH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(36dihydro12dioxin) + radical(CJCOOH)"""),
)
species(
label = 'C=CC=C[C](O)OO(7352)',
structure = SMILES('[CH2]C=CC=C(O)OO'),
E0 = (-65.4831,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,3000,3100,440,815,1455,1000,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,350,440,435,1725,3615,1277.5,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.802397,'amu*angstrom^2'), symmetry=1, barrier=(18.4487,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.98572,'amu*angstrom^2'), symmetry=1, barrier=(68.6475,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.802445,'amu*angstrom^2'), symmetry=1, barrier=(18.4498,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.974965,'amu*angstrom^2'), symmetry=1, barrier=(22.4164,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.802195,'amu*angstrom^2'), symmetry=1, barrier=(18.444,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[-0.1195,0.0829667,-8.5835e-05,4.50809e-08,-9.25714e-12,-7720.98,29.8344], Tmin=(100,'K'), Tmax=(1194.49,'K')), NASAPolynomial(coeffs=[18.5577,0.0204223,-7.29391e-06,1.24576e-09,-8.2715e-14,-12182.9,-63.5917], Tmin=(1194.49,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-65.4831,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-O2s(Cds-Cd)) + group(O2s-(Cds-Cd)H) + group(O2s-OsH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsCsCs) + radical(C=CC=CCJ)"""),
)
species(
label = 'C=CC=CC([O])OO(7353)',
structure = SMILES('C=CC=CC([O])OO'),
E0 = (-4.95273,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,281.056,281.058,281.061],'cm^-1')),
HinderedRotor(inertia=(0.208929,'amu*angstrom^2'), symmetry=1, barrier=(11.7113,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.208917,'amu*angstrom^2'), symmetry=1, barrier=(11.7113,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.390435,'amu*angstrom^2'), symmetry=1, barrier=(21.8859,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.608441,'amu*angstrom^2'), symmetry=1, barrier=(34.106,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.825757,0.0751621,-7.62569e-05,4.33477e-08,-1.04449e-11,-485.824,28.698], Tmin=(100,'K'), Tmax=(976.692,'K')), NASAPolynomial(coeffs=[10.1013,0.0371744,-1.79153e-05,3.52489e-09,-2.51554e-13,-2297.68,-15.8322], Tmin=(976.692,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-4.95273,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(CCOJ)"""),
)
species(
label = 'C=CC=[C]C(O)OO(7354)',
structure = SMILES('C=CC=[C]C(O)OO'),
E0 = (7.18391,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.289035,0.0842883,-9.55147e-05,5.71196e-08,-1.37804e-11,995.492,30.2301], Tmin=(100,'K'), Tmax=(1002.14,'K')), NASAPolynomial(coeffs=[14.0008,0.0295585,-1.35954e-05,2.62336e-09,-1.85416e-13,-1752.73,-35.9505], Tmin=(1002.14,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(7.18391,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(Cds_S)"""),
)
species(
label = 'C=C[C]=CC(O)OO(7355)',
structure = SMILES('C=C[C]=CC(O)OO'),
E0 = (-31.6624,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,3615,1277.5,1000,1685,370,2995,3025,975,1000,1300,1375,400,500,1630,1680,3615,1310,387.5,850,1000,1380,1390,370,380,2900,435,180,180],'cm^-1')),
HinderedRotor(inertia=(0.833653,'amu*angstrom^2'), symmetry=1, barrier=(19.1673,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.833514,'amu*angstrom^2'), symmetry=1, barrier=(19.1641,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(2.28261,'amu*angstrom^2'), symmetry=1, barrier=(52.4818,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.833667,'amu*angstrom^2'), symmetry=1, barrier=(19.1676,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.833644,'amu*angstrom^2'), symmetry=1, barrier=(19.1671,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.148032,0.0873692,-0.000103822,6.59637e-08,-1.68434e-11,-3671.62,29.8587], Tmin=(100,'K'), Tmax=(952.08,'K')), NASAPolynomial(coeffs=[13.8466,0.0298173,-1.31498e-05,2.47313e-09,-1.71897e-13,-6280.05,-35.5563], Tmin=(952.08,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-31.6624,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(C=CJC=C)"""),
)
species(
label = 'C=[C]C=CC(O)OO(7356)',
structure = SMILES('C=C=C[CH]C(O)OO'),
E0 = (-47.5799,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3025,407.5,1350,352.5,2950,3100,1380,975,1025,1650,3615,1277.5,1000,3010,987.5,1337.5,450,1655,3615,1310,387.5,850,1000,1380,1390,370,380,2900,435,540,610,2055,180,180],'cm^-1')),
HinderedRotor(inertia=(1.21998,'amu*angstrom^2'), symmetry=1, barrier=(28.0497,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.22004,'amu*angstrom^2'), symmetry=1, barrier=(28.051,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.21993,'amu*angstrom^2'), symmetry=1, barrier=(28.0486,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.22001,'amu*angstrom^2'), symmetry=1, barrier=(28.0503,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.21994,'amu*angstrom^2'), symmetry=1, barrier=(28.0489,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.188145,0.0857514,-9.10099e-05,4.99889e-08,-1.10947e-11,-5586.8,28.452], Tmin=(100,'K'), Tmax=(1082.36,'K')), NASAPolynomial(coeffs=[15.1394,0.030498,-1.44378e-05,2.82619e-09,-2.01471e-13,-8823.4,-44.863], Tmin=(1082.36,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-47.5799,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cs-(Cds-Cds)CsHH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + group(Cdd-CdsCds) + radical(C=CCJCO)"""),
)
species(
label = '[CH]=CC=CC(O)OO(7357)',
structure = SMILES('[CH]=CC=CC(O)OO'),
E0 = (16.4383,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1310,387.5,850,1000,3120,650,792.5,1650,1380,1390,370,380,2900,435,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,3615,1277.5,1000,180],'cm^-1')),
HinderedRotor(inertia=(0.822094,'amu*angstrom^2'), symmetry=1, barrier=(18.9016,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.822887,'amu*angstrom^2'), symmetry=1, barrier=(18.9198,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.822338,'amu*angstrom^2'), symmetry=1, barrier=(18.9072,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.822125,'amu*angstrom^2'), symmetry=1, barrier=(18.9023,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.822649,'amu*angstrom^2'), symmetry=1, barrier=(18.9143,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.156604,0.0843671,-9.17345e-05,5.12235e-08,-1.14038e-11,2115.7,30.6292], Tmin=(100,'K'), Tmax=(1088.84,'K')), NASAPolynomial(coeffs=[15.9618,0.0263037,-1.17441e-05,2.24669e-09,-1.58439e-13,-1326.1,-46.9665], Tmin=(1088.84,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(16.4383,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-CsH) + group(O2s-OsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(Cds_P)"""),
)
species(
label = 'C=CC1[CH]C(O)OO1(7323)',
structure = SMILES('C=CC1[CH]C(O)OO1'),
E0 = (-56.994,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.03075,0.047502,2.30603e-05,-7.57959e-08,3.67907e-11,-6731.23,29.2158], Tmin=(100,'K'), Tmax=(906.734,'K')), NASAPolynomial(coeffs=[19.0555,0.0152985,-1.93271e-06,1.24922e-10,-7.80083e-15,-11944.9,-66.7032], Tmin=(906.734,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-56.994,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(Cs-(Cds-Cds)CsOsH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-CdsHH) + ring(12dioxolane) + radical(CCJCOOH)"""),
)
species(
label = 'OC1[CH]C=CCOO1(7305)',
structure = SMILES('OC1C=C[CH]COO1'),
E0 = (-134.081,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (115.107,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.89044,0.0160801,0.000182109,-2.88263e-07,1.23453e-10,-15965.4,25.0312], Tmin=(100,'K'), Tmax=(911.314,'K')), NASAPolynomial(coeffs=[40.3276,-0.0204779,1.75379e-05,-3.46134e-09,2.21254e-13,-28823.2,-192.676], Tmin=(911.314,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-134.081,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(353.365,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsCs) + group(O2s-CsH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cds-CdsCsH) + group(Cds-CdsCsH) + ring(Cycloheptane) + radical(C=CCJCO)"""),
)
species(
label = 'C=CC=C=CO(6203)',
structure = SMILES('C=CC=C=CO'),
E0 = (25.7073,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,540,610,2055,2950,3100,1380,975,1025,1650,180],'cm^-1')),
HinderedRotor(inertia=(1.24386,'amu*angstrom^2'), symmetry=1, barrier=(28.5988,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(1.24416,'amu*angstrom^2'), symmetry=1, barrier=(28.6057,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (82.1005,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.02364,0.048058,2.85443e-06,-5.66469e-08,3.08127e-11,3215.32,19.3983], Tmin=(100,'K'), Tmax=(909.916,'K')), NASAPolynomial(coeffs=[22.142,0.00160986,2.95307e-06,-6.91045e-10,4.50216e-14,-2548.21,-91.0442], Tmin=(909.916,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(25.7073,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsOsH) + group(Cds-CdsHH) + group(Cdd-CdsCds)"""),
)
species(
label = 'O(4)(4)',
structure = SMILES('[O]'),
E0 = (243.005,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (15.9994,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,29226.7,5.11107], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,29226.7,5.11107], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(243.005,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""O""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=CC=CC([O])O(7367)',
structure = SMILES('C=CC=CC([O])O'),
E0 = (-76.4577,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,2950,3100,1380,975,1025,1650,1380,1390,370,380,2900,435,2995,3010,3025,975,987.5,1000,1300,1337.5,1375,400,450,500,1630,1655,1680,180,180,4000],'cm^-1')),
HinderedRotor(inertia=(0.830092,'amu*angstrom^2'), symmetry=1, barrier=(19.0855,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.831697,'amu*angstrom^2'), symmetry=1, barrier=(19.1224,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.831731,'amu*angstrom^2'), symmetry=1, barrier=(19.1231,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (99.1079,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.37943,0.0598699,-5.1679e-05,2.45689e-08,-4.95755e-12,-9103.3,25.9347], Tmin=(100,'K'), Tmax=(1147.37,'K')), NASAPolynomial(coeffs=[9.64425,0.0310565,-1.40099e-05,2.68154e-09,-1.8848e-13,-10999.8,-15.0744], Tmin=(1147.37,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-76.4577,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(O2s-CsH) + group(Cs-CsOsOsH) + group(Cds-CdsCsH) + group(Cds-Cds(Cds-Cds)H) + group(Cds-Cds(Cds-Cds)H) + group(Cds-CdsHH) + radical(CCOJ)"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ar(8)',
structure = SMILES('[Ar]'),
E0 = (-6.19426,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (39.348,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,-745,4.3663], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,-745,4.3663], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-6.19426,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ar""", comment="""Thermo library: BurkeH2O2"""),
)
transitionState(
label = 'TS1',
E0 = (-38.501,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (273.511,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (358.844,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (355.739,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (298.833,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (370.981,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (403.032,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (336.331,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (320.413,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (380.235,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (132.18,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (43.7598,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (43.3898,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (-62.1961,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (109.627,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (100.689,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (66.9498,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (1.37779,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (71.9725,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (167.959,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (10.0679,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (0.273836,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (44.5658,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS24',
E0 = (100.004,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS25',
E0 = (166.547,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['O2(2)(2)', 'C5H7O(224)(223)'],
products = ['S(227)(226)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(3.19343e+07,'m^3/(mol*s)'), n=0.0113737, Ea=(2.96199,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [C_rad/H/OneDe;Y_rad] for rate rule [C_rad/H/OneDeO;O2_birad]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 2.0
family: R_Recombination"""),
)
reaction(
label = 'reaction10',
reactants = ['OH(5)(5)', 'C=CC=C[CH]O[O](2605)'],
products = ['S(227)(226)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(9.81233e+06,'m^3/(mol*s)'), n=0.00568686, Ea=(1.481,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Y_rad;C_rad/H/OneDe] + [O_rad;C_sec_rad] for rate rule [O_pri_rad;C_rad/H/OneDeO]
Euclidian distance = 2.2360679775
family: R_Recombination"""),
)
reaction(
label = 'reaction11',
reactants = ['H(3)(3)', 'C=CC=CC([O])O[O](7130)'],
products = ['S(227)(226)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(5.21063e+06,'m^3/(mol*s)'), n=0.156446, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [Y_rad;O_rad/NonDe] + [H_rad;O_sec_rad] for rate rule [H_rad;O_rad/NonDe]
Euclidian distance = 1.0
family: R_Recombination
Ea raised from -0.7 to 0 kJ/mol."""),
)
reaction(
label = 'reaction12',
reactants = ['CH2CHCHCH(913)', '[O]O[CH]O(7358)'],
products = ['S(227)(226)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(4.18e+06,'m^3/(mol*s)'), n=-0.085, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [C_rad/H/NonDeO;Y_rad] for rate rule [C_rad/H/O2;Cd_pri_rad]
Euclidian distance = 2.2360679775
family: R_Recombination
Ea raised from -2.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction13',
reactants = ['H(3)(3)', 'C=CC=C[C](O)O[O](7359)'],
products = ['S(227)(226)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(2.92e+13,'cm^3/(mol*s)'), n=0.18, Ea=(0.518816,'kJ/mol'), T0=(1,'K'), Tmin=(200,'K'), Tmax=(2000,'K'), comment="""Estimated using template [C_rad/OneDe;H_rad] for rate rule [C_rad/OneDeO;H_rad]
Euclidian distance = 1.0
family: R_Recombination"""),
)
reaction(
label = 'reaction14',
reactants = ['H(3)(3)', 'C=CC=[C]C(O)O[O](7360)'],
products = ['S(227)(226)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(1e+13,'cm^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 40 used for Cd_rad/NonDe;H_rad
Exact match found for rate rule [Cd_rad/NonDe;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction15',
reactants = ['C2H3(28)(29)', '[CH]=CC(O)O[O](7361)'],
products = ['S(227)(226)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(7.23e+13,'cm^3/(mol*s)','+|-',1.2e+13), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(298,'K'), comment="""From training reaction 89 used for Cd_pri_rad;Cd_pri_rad
Exact match found for rate rule [Cd_pri_rad;Cd_pri_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction16',
reactants = ['H(3)(3)', 'C=C[C]=CC(O)O[O](7362)'],
products = ['S(227)(226)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(6.117e+14,'cm^3/(mol*s)'), n=-0.152, Ea=(4.19655,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 49 used for Cd_rad/Cd;H_rad
Exact match found for rate rule [Cd_rad/Cd;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction17',
reactants = ['H(3)(3)', 'C=[C]C=CC(O)O[O](7363)'],
products = ['S(227)(226)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(6.117e+14,'cm^3/(mol*s)'), n=-0.152, Ea=(4.19655,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 49 used for Cd_rad/Cd;H_rad
Exact match found for rate rule [Cd_rad/Cd;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction18',
reactants = ['H(3)(3)', '[CH]=CC=CC(O)O[O](7364)'],
products = ['S(227)(226)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(1.21e+14,'cm^3/(mol*s)','+|-',4.82e+13), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(298,'K'), comment="""From training reaction 60 used for H_rad;Cd_pri_rad
Exact match found for rate rule [Cd_pri_rad;H_rad]
Euclidian distance = 0
family: R_Recombination"""),
)
reaction(
label = 'reaction21',
reactants = ['CC=CC=C(O)O[O](7365)'],
products = ['S(227)(226)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(1.02873e+09,'s^-1'), n=1.23767, Ea=(163.714,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [1_3_pentadiene;CH_end;unsaturated_end] for rate rule [1_3_pentadiene;CH3_1;unsaturated_end]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: Intra_ene_reaction"""),
)
reaction(
label = 'reaction24',
reactants = ['S(227)(226)'],
products = ['[O]OC(O)C1C=CC1(7366)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(4.99998e+11,'s^-1'), n=0.0559095, Ea=(122.413,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [1,3-butadiene_backbone;C=C_1;C=C_2] for rate rule [1,3-butadiene_backbone;CdH(C)_1;CdH2_2]
Euclidian distance = 1.41421356237
family: Intra_2+2_cycloaddition_Cd"""),
)
reaction(
label = 'reaction2',
reactants = ['S(227)(226)'],
products = ['C=C[CH]C1OOC1O(7350)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(2.724e+10,'s^-1','*|/',3), n=0.478, Ea=(122.043,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R5_SS_D;doublebond_intra;radadd_intra_O] for rate rule [R5_SS_D;doublebond_intra_HCd_pri;radadd_intra_O]
Euclidian distance = 3.0
family: Intra_R_Add_Exocyclic"""),
)
reaction(
label = 'reaction3',
reactants = ['S(227)(226)'],
products = ['[CH2]C1C=CC(O)OO1(7351)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(17945.7,'s^-1'), n=1.45333, Ea=(16.4571,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R7_SSSR;doublebond_intra_2H_pri;radadd_intra] for rate rule [R7_SSSM_D;doublebond_intra_2H_pri;radadd_intra_O]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Exocyclic"""),
)
reaction(
label = 'reaction4',
reactants = ['C=CC=C[C](O)OO(7352)'],
products = ['S(227)(226)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(4.47099e+07,'s^-1'), n=1.47622, Ea=(175.11,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_SS;C_rad_out_noH;XH_out] for rate rule [R3H_SS_O;C_rad_out_OneDe/O;O_H_out]
Euclidian distance = 2.44948974278
family: intra_H_migration"""),
)
reaction(
label = 'reaction5',
reactants = ['C=CC=CC([O])OO(7353)'],
products = ['S(227)(226)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(94.0113,'s^-1'), n=2.81534, Ea=(105.641,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R4H_SSS;Y_rad_out;O_H_out] + [R4H_SSS;O_rad_out;XH_out] for rate rule [R4H_SSS;O_rad_out;O_H_out]
Euclidian distance = 1.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction6',
reactants = ['S(227)(226)'],
products = ['C=CC=[C]C(O)OO(7354)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(274,'s^-1'), n=3.09, Ea=(145.603,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 337 used for R4H_SSS_OCs;O_rad_out;Cd_H_out_doubleC
Exact match found for rate rule [R4H_SSS_OCs;O_rad_out;Cd_H_out_doubleC]
Euclidian distance = 0
family: intra_H_migration"""),
)
reaction(
label = 'reaction7',
reactants = ['C=C[C]=CC(O)OO(7355)'],
products = ['S(227)(226)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(136000,'s^-1'), n=1.9199, Ea=(33.0402,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R5H;Cd_rad_out_single;XH_out] for rate rule [R5H_DSSS;Cd_rad_out_singleDe_Cd;O_H_out]
Euclidian distance = 3.74165738677
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['C=[C]C=CC(O)OO(7356)'],
products = ['S(227)(226)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(1.62236e+06,'s^-1'), n=1.59783, Ea=(119.552,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [RnH;Cd_rad_out_Cd;XH_out] + [R6H_SMSSR;Y_rad_out;XH_out] for rate rule [R6H_SMSSR;Cd_rad_out_Cd;O_H_out]
Euclidian distance = 3.16227766017
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['[CH]=CC=CC(O)OO(7357)'],
products = ['S(227)(226)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(1.86943e+06,'s^-1'), n=1.85754, Ea=(151.521,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [RnH;Cd_rad_out_singleH;XH_out] for rate rule [R7H;Cd_rad_out_singleH;O_H_out]
Euclidian distance = 2.2360679775
family: intra_H_migration"""),
)
reaction(
label = 'reaction19',
reactants = ['S(227)(226)'],
products = ['C=CC1[CH]C(O)OO1(7323)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(1.81589e+09,'s^-1'), n=0.403324, Ea=(88.7211,'kJ/mol'), T0=(1,'K'), comment="""Estimated using average of templates [R5_linear;doublebond_intra_pri_HCd;radadd_intra] + [R5_SS_D;doublebond_intra_pri;radadd_intra] for rate rule [R5_SS_D;doublebond_intra_pri_HCd;radadd_intra_O]
Euclidian distance = 2.2360679775
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction20',
reactants = ['S(227)(226)'],
products = ['OC1[CH]C=CCOO1(7305)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(2.76476e+07,'s^-1'), n=0.815689, Ea=(78.927,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R7_linear;doublebond_intra_pri_2H;radadd_intra] for rate rule [R7_linear;doublebond_intra_pri_2H;radadd_intra_O]
Euclidian distance = 1.0
family: Intra_R_Add_Endocyclic"""),
)
reaction(
label = 'reaction22',
reactants = ['S(227)(226)'],
products = ['HO2(8)(9)', 'C5H6O(217)(216)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(6.813e+10,'s^-1','*|/',10), n=0.493, Ea=(123.219,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""From training reaction 13 used for R2OO_O_HNd
Exact match found for rate rule [R2OO_O_HNd]
Euclidian distance = 0
family: HO2_Elimination_from_PeroxyRadical"""),
)
reaction(
label = 'reaction23',
reactants = ['S(227)(226)'],
products = ['HO2(8)(9)', 'C=CC=C=CO(6203)'],
transitionState = 'TS24',
kinetics = Arrhenius(A=(3.63e+09,'s^-1'), n=1.11, Ea=(178.657,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using an average for rate rule [R2OO_0H]
Euclidian distance = 0
family: HO2_Elimination_from_PeroxyRadical"""),
)
reaction(
label = 'reaction25',
reactants = ['O(4)(4)', 'C=CC=CC([O])O(7367)'],
products = ['S(227)(226)'],
transitionState = 'TS25',
kinetics = Arrhenius(A=(1355.7,'m^3/(mol*s)'), n=1.40819, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""From training reaction 3 used for O_rad/NonDe;O_birad
Exact match found for rate rule [O_rad/NonDe;O_birad]
Euclidian distance = 0
family: Birad_R_Recombination
Ea raised from -12.0 to 0 kJ/mol."""),
)
network(
label = '704',
isomers = [
'S(227)(226)',
],
reactants = [
('O2(2)(2)', 'C5H7O(224)(223)'),
('HO2(8)(9)', 'C5H6O(217)(216)'),
],
bathGas = {
'Ne': 0.333333,
'N2': 0.333333,
'Ar(8)': 0.333333,
},
)
pressureDependence(
label = '704',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
999,534 | 7ca2d983cec914d3945f3098f33038474efb211b | import dataclasses
import datetime
import hashlib
import io
import json
import logging
import lzma
import os
import random
import tarfile
import tempfile
import typing
import glci.model
logger = logging.getLogger(__name__)
dc = dataclasses.dataclass
@dc
class OCIConfig:
pass
@dc
class OCIContainerCfg:
Hostname: str
Image: str # sha256-hash
Domainname: str = ''
User: str = ''
AttachStdin: bool = False
AttachStdout: bool = False
Tty: bool = False
OpenStdin: bool = False
StdinOnce: bool = False
Env: typing.Tuple[str] = (
'PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
)
Cmd: typing.Tuple[str] = ('/bin/sh',)
ArgsEscaped: bool = False
Volumes: typing.Optional[typing.Tuple[str]] = None
WorkingDir: str = ''
Entrypoint: str = None
OnBuild: str = None
Labels: typing.Dict[str, str] = dataclasses.field(
default_factory=dict,
)
@dc
class OCI_Fs:
diff_ids: typing.Tuple[str] # tuple of layer-sha256-digests
type: str = 'layers'
@dc
class OCIManifestEntry:
config: OCIContainerCfg
container_config: OCIContainerCfg
created: str # iso8601-ts
container: str # container-hash
# container_config: OCIContainerCfg
rootfs: OCI_Fs
architecture: str='amd64'
docker_version: str='18.09.7'
history: typing.Tuple[dict] = ()
os: str = 'linux'
@dc
class OCIManifest:
__filename__ = 'manifest.json'
Config: str # relpath to config.json
Layers: typing.Tuple[str] # relpaths to <layer>/layer.tar
RepoTags: typing.Tuple[str] # repo-tags (e.g. eu.gcr.io/foo/bar:latest)
@dc
class LayerContainerCfg:
Cmd: typing.List[str]
@dc
class LayerCfg:
container_config: LayerContainerCfg
created: str # isoformat ts
id: str # digest
def buf_leng(fileobj):
leng = fileobj.seek(0, io.SEEK_END)
fileobj.seek(0)
return leng
def image_from_rootfs(
rootfs_tar_xz: str,
image_reference: str,
):
'''
creates an OCI-compliant container image, based on the legacy V1 spec used
by docker with exactly one layer, containing the filesystem contained in
`rootfs_tar_xz`, which is expected to be an xzip-compressed tarfile.
the resulting file is created as a (named) temporary file, and returned to
the caller. The caller is responsible for unlinking the file.
'''
# extract and calculate sha256 digest
rootfs_tar = tempfile.NamedTemporaryFile()
rootfs_sha256_hash = hashlib.sha256()
with lzma.open(rootfs_tar_xz) as f:
while chunk := f.read(4069):
rootfs_sha256_hash.update(chunk)
rootfs_tar.write(chunk)
rootfs_sha256_digest = rootfs_sha256_hash.hexdigest()
rootfs_tar.flush()
now_ts = datetime.datetime.now().isoformat() + 'Z'
container_id = hashlib.sha256(f'{random.randint(0, 2 ** 32)}'.encode('utf-8')).hexdigest()
logger.info(f'{container_id=}')
# create manifest entry (name it as the hash)
manifest_entry = OCIManifestEntry(
created=now_ts,
container=rootfs_sha256_digest, # deviates from what docker does
config=OCIContainerCfg(
Hostname=rootfs_sha256_digest,
Image=f'sha256:{rootfs_sha256_digest}',
# use defaults from dataclass definition
),
container_config=OCIContainerCfg(
Hostname=rootfs_sha256_digest,
Image=f'sha256:{rootfs_sha256_digest}',
# use defaults from dataclass definition
),
rootfs=OCI_Fs(
diff_ids=[f'sha256:{rootfs_sha256_digest}'],
type='layers',
),
architecture='amd64',
os='linux',
)
manifest_buf = io.BytesIO(json.dumps(dataclasses.asdict(manifest_entry)).encode('utf-8'))
manifest_buf_leng = buf_leng(manifest_buf)
manifest_sha256_hash = hashlib.sha256(manifest_buf.read())
manifest_buf.seek(0)
manifest_entry_fname = f'{manifest_sha256_hash.hexdigest()}.json'
image_tar = tarfile.open(tempfile.NamedTemporaryFile().name, 'w')
manifest_info = tarfile.TarInfo(name=manifest_entry_fname)
manifest_info.size = manifest_buf_leng
image_tar.addfile(tarinfo=manifest_info, fileobj=manifest_buf)
# add img-dir
img_directory_info = tarfile.TarInfo(name=container_id)
img_directory_info.type = tarfile.DIRTYPE
img_directory_info.mode = 0x755
image_tar.addfile(tarinfo=img_directory_info)
version_info = tarfile.TarInfo(name=f'{container_id}/VERSION')
version_buf = io.BytesIO(b'1.0')
version_buf_leng = buf_leng(version_buf)
version_info.size = version_buf_leng
image_tar.addfile(tarinfo=version_info, fileobj=version_buf)
layer_json_info = tarfile.TarInfo(name=f'{container_id}/json')
layer_info_buf = io.BytesIO(
json.dumps(
dataclasses.asdict(
LayerCfg(
id=container_id,
created=now_ts,
container_config=LayerContainerCfg(
Cmd=[''],
),
)
)
).encode('utf-8')
)
layer_json_info.size = buf_leng(layer_info_buf)
image_tar.addfile(tarinfo=layer_json_info, fileobj=layer_info_buf)
layer_tar_fname = f'{container_id}/layer.tar'
image_tar.add(name=rootfs_tar.name, arcname=layer_tar_fname)
# add manifest.json
manifest = OCIManifest(
Config=manifest_entry_fname,
Layers=[layer_tar_fname],
RepoTags=[image_reference],
)
manifest_buf = io.BytesIO(
json.dumps([dataclasses.asdict(manifest)]).encode('utf-8'),
)
manifest_info = tarfile.TarInfo(name='manifest.json')
manifest_info.size = buf_leng(manifest_buf)
image_tar.addfile(tarinfo=manifest_info, fileobj=manifest_buf)
# add repositories
repo, tag = image_reference.split(':')
repositories_dict = {
repo: {
tag: container_id,
},
}
repositories_buf = io.BytesIO(json.dumps(repositories_dict).encode('utf-8'))
repositories_info = tarfile.TarInfo(name='repositories')
repositories_info.size = buf_leng(repositories_buf)
image_tar.addfile(tarinfo=repositories_info, fileobj=repositories_buf)
image_tar.fileobj.flush()
print(image_tar.name)
return image_tar.name
def publish_image(
release: glci.model.OnlineReleaseManifest,
publish_cfg: glci.model.OciPublishCfg,
s3_client,
publish_oci_image_func: callable,
):
image_tag = f'{release.version}-{release.canonical_release_manifest_key_suffix()}'. \
replace('_', '-')
image_name = f'{publish_cfg.image_prefix}:{image_tag}'
rootfs_key = release.path_by_suffix('rootfs.tar.xz').s3_key
rootfs_bucket_name = release.path_by_suffix('rootfs.tar.xz').s3_bucket_name
with tempfile.TemporaryFile() as tfh:
s3_client.download_fileobj(
Bucket=rootfs_bucket_name,
Key=rootfs_key,
Fileobj=tfh,
)
tfh.seek(0)
logger.info(f'retrieved raw image fs from {rootfs_bucket_name=}')
oci_image_file = image_from_rootfs(
rootfs_tar_xz=tfh,
image_reference=image_name,
)
logger.info(f'created serialised {oci_image_file=}')
try:
oci_image_fileobj = open(oci_image_file)
publish_oci_image_func(
image_reference=image_name,
image_file_obj=oci_image_fileobj,
)
logger.info('publishing succeeded')
finally:
oci_image_fileobj.close()
os.unlink(oci_image_file)
published_image_reference = glci.model.OciPublishedImage(
image_reference=image_name,
)
return dataclasses.replace(release, published_image_metadata=published_image_reference)
|
999,535 | 458a8a17b6f812d2ab8f93d7d485baaaa8fffaad | # The authentication credentials for using twilio
# Dont bothering copying this, its a trial account : )
account_sid = "AC72bf334a5108bf91e562ca51d8154e95" # Change with your twilio account id
auth_token = "891df13e81ca64091805aab9b0f86ecf" # Change with your twilio auth token
twilio_number = "+12058756630" # Change with your twilio number
aws_access_key_id="ASIASWREH62U5467WSMR"
aws_secret_access_key="eTdCtZ2i5CFG+bvuHb5XaGIbNJ2x4E6mEBYNpamE"
aws_session_token="FwoGZXIvYXdzEAAaDMWwv9nW57mOJYBnHSK/AW7KVNuJytdgcCHswfJ+PfujDRAiOTaFjvtOQvVkH9zLsZ9Z8ktWDLvWI6WQiLc3U7gEwzVXtpYdW7zf/3XepJ9na+00iqaO56BqLu0o31sAzvqXjSyZbcKXMg0ccV0DVlSlq4fa9aLmwKke81XOx9YxuGlSqxfQ7bUB53nsvuRLsb2U6fgXaxgvOTzPRH8aJhogd45mqu6tHPt+9Uib1dvg8fLTxUvcyTkVD4761IJAfm5qJ43SEJSltUHaJlD/KLmI0voFMi2XlL1jjVoLH4gm03o7oGQy+TM4Xe0G5qYmpscHyp/x5vTr6ysTQFXKgVyrvmk="
region_name="us-east-1" |
999,536 | 73ebe42e10ccef331e9efb9e89306fa0adbdad1b | import dash_html_components as html
from utils import Header, make_dash_table, plotgraph1, plotgraph2
import pandas as pd
import pathlib
def create_layout(app, report_list):
rows = []
for ii in range(len(report_list)): # numero de filas
elems = []
for jj in range(len(report_list[ii])):
if report_list[ii][jj][2]=="product":
aux = html.H5(report_list[ii][jj][0])
else:
aux = html.H6(report_list[ii][jj][0],className="subtitle padded")
elems.append(
html.Div(
[
aux
,
report_list[ii][jj][1],
],
className=report_list[ii][jj][2],
)
)
rows.append( html.Div(
elems,
className="row",
)
)
return html.Div(
[
html.Div([Header(app)]),
# page
html.Div( rows ,
className="sub_page", ),
],
className="page",
)
# for range(report_list):
# r0_e0 = report_list[0][0]
# r1_e0 = report_list[1][0]
# r1_e1 = report_list[1][1]
# r2_e0 = report_list[2][0]
# r3_e0 = report_list[3][0]
# r3_e1 = report_list[3][1]
# # Page layouts
# # Row 3
# row3 = html.Div(
# [
# html.Div(
# [
# html.H5(r0_e0[0]),
# r0_e0[1],
# ],
# className="product",
# )
# ],
# className="row",
# )
# # Row 4
# row4 = html.Div(
# [
# html.Div(
# [
# html.H6( r1_e0[0], className="subtitle padded"),
# r1_e0[1],
# ],
# className="six columns",
# ),
# html.Div(
# [
# html.H6(r1_e1[0], className="subtitle padded",),
# r1_e1[1],
# ],
# className="six columns",
# ),
# ],
# className="row",
# # style={"margin-bottom": "35px"},
# )
# # Row 5
# row5 = html.Div(
# [
# html.Div(
# [ html.H6(r2_e0[0], className="subtitle padded",),
# r2_e0[1],
# ],
# className="twelve columns",
# ),
# ],
# className="row ",
# )
# row6 = html.Div(
# [
# html.Div(
# [ html.H6( r3_e0[0], className="subtitle padded",),
# r3_e0[1],
# ],
# className="six columns",
# ),
# html.Div(
# [ html.H6( r3_e0[0], className="subtitle padded",),
# r3_e0[1],
# ],
# className="six columns",
# ),
# ],
# className="row ",
# )
# return html.Div(
# [
# html.Div([Header(app)]),
# # page 1
# html.Div( [ row3, row4, row5, row6],
# className="sub_page", ),
# ],
# className="page",
# )
|
999,537 | 107386fec296434d7329b20e5131890cc210b9c6 | import cv2
import time
import numpy as np
# To save the output in a video form (in format of output.avi)
fourcc=cv2.VideoWriter_fourcc(*'XVID')
outputFile=cv2.VideoWriter("output.avi",fourcc,20.0,(640,480))
#to start the webcam
capture=cv2.VideoCapture(0)
# making the cam sleep for 2 secs
time.sleep(2)
#capturing background for 60 frames
bg=0
for i in range(60):
ret,bg=capture.read()
# flipping the background
bg=np.flip(bg,axis=1)
# capturing the frame till the camera is open
while(capture.isOpened()):
ret,image=capture.read()
if not ret:
break
image=np.flip(image,axis=1)
#Converting the color BGR to HSV(Hue saturation value)
hsv=cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
#generating mask to detect red color
lowerRed=np.array([0,120,50])
upperRed=np.array([10,255,255])
mask1=cv2.inRange(hsv,lowerRed,upperRed)
lowerRed=np.array([170,120,70])
upperRed=np.array([180,255,255])
mask2=cv2.inRange(hsv,lowerRed,upperRed)
mask1=mask1+mask2
#open and expand the image in mask1
mask1=cv2.morphologyEx(mask1,cv2.MORPH_OPEN,np.ones((3,3),np.uint8))
mask1=cv2.morphologyEx(mask1,cv2.MORPH_DILATE,np.ones((3,3),np.uint8))
#selecting the area that does not have red color
mask2=cv2.bitwise_not(mask1)
#keeping the images without red color
res1=cv2.bitwise_and(image,image,mask=mask2)
#keeping the images with red color replaced with bg
res2=cv2.bitwise_and(bg,bg,mask=mask1)
#final output
finalOutput=cv2.addWeighted(res1,1,res2,1,0)
outputFile.write(finalOutput)
cv2.waitKey(1)
capture.release()
cv2.destroyAllWindows() |
999,538 | 31560b8de6af6beebb6efe89f31625e90ef6e125 | """Support for the Foursquare (Swarm) API."""
from http import HTTPStatus
import logging
import requests
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.core import HomeAssistant, ServiceCall
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
CONF_PUSH_SECRET = "push_secret"
DOMAIN = "foursquare"
EVENT_CHECKIN = "foursquare.checkin"
EVENT_PUSH = "foursquare.push"
SERVICE_CHECKIN = "checkin"
CHECKIN_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional("alt"): cv.string,
vol.Optional("altAcc"): cv.string,
vol.Optional("broadcast"): cv.string,
vol.Optional("eventId"): cv.string,
vol.Optional("ll"): cv.string,
vol.Optional("llAcc"): cv.string,
vol.Optional("mentions"): cv.string,
vol.Optional("shout"): cv.string,
vol.Required("venueId"): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Required(CONF_PUSH_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Foursquare component."""
config = config[DOMAIN]
def checkin_user(call: ServiceCall) -> None:
"""Check a user in on Swarm."""
url = f"https://api.foursquare.com/v2/checkins/add?oauth_token={config[CONF_ACCESS_TOKEN]}&v=20160802&m=swarm"
response = requests.post(url, data=call.data, timeout=10)
if response.status_code not in (HTTPStatus.OK, HTTPStatus.CREATED):
_LOGGER.exception(
"Error checking in user. Response %d: %s:",
response.status_code,
response.reason,
)
hass.bus.fire(EVENT_CHECKIN, {"text": response.text})
# Register our service with Home Assistant.
hass.services.register(
DOMAIN, "checkin", checkin_user, schema=CHECKIN_SERVICE_SCHEMA
)
hass.http.register_view(FoursquarePushReceiver(config[CONF_PUSH_SECRET]))
return True
class FoursquarePushReceiver(HomeAssistantView):
"""Handle pushes from the Foursquare API."""
requires_auth = False
url = "/api/foursquare"
name = "foursquare"
def __init__(self, push_secret):
"""Initialize the OAuth callback view."""
self.push_secret = push_secret
async def post(self, request):
"""Accept the POST from Foursquare."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTPStatus.BAD_REQUEST)
secret = data.pop("secret", None)
_LOGGER.debug("Received Foursquare push: %s", data)
if self.push_secret != secret:
_LOGGER.error(
"Received Foursquare push with invalid push secret: %s", secret
)
return self.json_message("Incorrect secret", HTTPStatus.BAD_REQUEST)
request.app["hass"].bus.async_fire(EVENT_PUSH, data)
|
999,539 | 8ab820e224020b2aa715cec7fc76333a3ab0a9f7 | import numpy as np
from matplotlib import pyplot as plt
import time
import cv2
import os
from Video_Tools import load_video
from Video_Tools import split_vid_into_rgb_channels
from Video_Tools import get_video_dimensions
from Video_Tools import devide_frame_into_roi_means
def normalization(colour_channel_values):
mean = np.mean(colour_channel_values)
normalized_values = colour_channel_values - mean
return normalized_values
# def overlap_add(input_signal, window_size=32, window_count=8):
#
# L = len(input_signal)
# overlap_signal = np.zeros((L), dtype='float64')
#
# offsets = np.linspace(0, L - window_size, window_count)
#
# hanning_window = np.hanning(window_size)
#
# for n in offsets:
# int_n = int(n)
# hann_window_signal = hanning_window * input_signal[int_n:int_n + window_size]
# overlap_signal[int_n/2:int_n/2 + window_size] += hann_window_signal
#
# return overlap_signal
# Calculate Pulse Signal and BPM value for every ROI
def chrom_based_pulse_signal_estimation(fps, red_temp_array, green_temp_array, blue_temp_array):
normalized_red_temp_array = normalization(red_temp_array)
normalized_green_temp_array = normalization(green_temp_array)
normalized_blue_temp_array = normalization(blue_temp_array)
# Chrominance Signal X & Y
chrom_x = 3 * normalized_red_temp_array - 2 * normalized_green_temp_array
chrom_y = 1.5 * normalized_red_temp_array + normalized_green_temp_array - 1.5 * normalized_blue_temp_array
# Standard deviation of x & y
std_dev_x = np.std(chrom_x)
std_dev_y = np.std(chrom_y)
# alpha
alpha = std_dev_x / std_dev_y
# pulse signal S
S = chrom_x - alpha * chrom_y
# Hann window signal
hann_window = np.hanning(len(S))
hann_window_signal = hann_window * S
# Fourier Transform
raw = np.fft.fft(hann_window_signal, 512)
L = int(len(raw) / 2 + 1)
fft1 = np.abs(raw[:L])
frequencies = np.linspace(0, fps / 2, L, endpoint=True)
heart_rates = frequencies * 60
# bandpass filter for pulse
bound_low = (np.abs(heart_rates - 55)).argmin()
bound_high = (np.abs(heart_rates - 180)).argmin()
fft1[:bound_low] = 0
fft1[bound_high:] = 0
max_freq_pos = np.argmax(fft1)
roi_bpm = heart_rates[max_freq_pos]
return roi_bpm, heart_rates, fft1, hann_window_signal, S
if __name__ == '__main__':
start_time = time.time()
dir_path = os.path.join('..', 'assets', 'Vid_Original', 'Kuenstliches_Licht')
file = '00130.MTS'
file_path = os.path.join(dir_path, file)
w_div = 16
h_div = 8
bpm_values = np.zeros((h_div, w_div), dtype='float64')
print(file_path)
vid_data, fps = load_video(file_path)
vid_data = vid_data[50:300]
frame_count, width, height = get_video_dimensions(vid_data)
print('Cutted length: ' + str(frame_count))
# w_steps = width/w_div
# h_steps = height/h_div
roi_mean_frames = np.zeros((frame_count, w_div, h_div, 3), dtype='float64')
#
for j, frame in enumerate(vid_data):
#
# Spatial Averaging
roi_means_2DArray, frame_devided = devide_frame_into_roi_means(frame, w_div, h_div)
#
# Create time series array of the roi means
roi_mean_frames[j] = roi_means_2DArray
red_vid_frames, green_vid_frames, blue_vid_frames = split_vid_into_rgb_channels(roi_mean_frames)
#
# Für die Darstellung der Puls Ergebnismatrix
fig = plt.figure(figsize=(17, 9))
fig.suptitle(file, fontsize=14, fontweight='bold')
sub1 = fig.add_subplot(121)
sub2 = fig.add_subplot(122)
#
#
'''BPM Estimation for every ROI'''
for x in range(0, w_div):
for y in range(0, h_div):
bpm, heart_rates, fft, hann_S, S = chrom_based_pulse_signal_estimation(fps, red_vid_frames[:, x, y], green_vid_frames[:, x, y], blue_vid_frames[:, x, y])
bpm_values[y, x] = bpm
sub2.text(x, y, round(bpm, 1), color=(0.745, 0.467, 0.294), fontsize=8, va='center', ha='center')
sub1.set_title('h-Signal')
sub1.imshow(frame_devided)
sub2.set_title('BPM Matrix')
sub2.matshow(bpm_values, cmap=plt.cm.gray)
# plt.matshow(bpm_values, cmap=plt.cm.gray)
plt.tight_layout()
# fig.savefig(file_path[:-4] + '.jpg')
plt.show()
print("--- %s seconds ---" % (time.time() - start_time))
|
999,540 | 280d453e48661fcfa99846a682dbaa2838037900 | import os
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
class CustomDataset(Dataset):
def __init__(self, X, y, transform=None):
random_shuffle = np.random.choice(len(X), len(X), replace=False)
self.samples = np.array(X.copy(), dtype='object')[random_shuffle]
self.labels = np.array(y.copy())[random_shuffle]
self.transform = transform
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
sample = self.samples[idx]
label = self.labels[idx]
if self.transform:
sample = self.transform(sample)
return sample, label
def make_dataset(folder, labels):
X = []
y = []
for label in labels:
cur_dir = str(folder) + '/' + str(label)
for i, img in enumerate(os.listdir(cur_dir)):
image = Image.open(str(cur_dir) + "/{}".format(img)).resize((128, 128))
X.append(image)
y.append(int(label))
return X, np.array(y) |
999,541 | effe3755f8696f71055f79ae95136c6063b2506f | API_SERVER = 'localhost' |
999,542 | 57c1a50ba10d933d849f9af214e9d740cdbda406 | import math
from tools.ConfigFactory import ConfigObject
from tools.Exceptions import ParseException
def autoconvDecorator(f):
def newFunction(self, other):
if not isinstance(other, LogNum):
other = self.factory(other)
return f(self, other)
return newFunction
NegativeInfinity = float("-inf")
class LogNum(ConfigObject):
value = float("-inf")
__slots__ = ('value',)
def __init__(self, value = float(0), log = True):
if isinstance(value, LogNum):
self.value = value.value
elif log:
try:
self.value = math.log(float(value))
except ValueError:
self.value = float("-inf")
except TypeError:
self.value = float("-inf")
else:
self.value = float(value)
def toJSON(self):
ret = ConfigObject.toJSON(self)
ret['val'] = self.value
return ret
def load(self, dictionary):
ConfigObject.load(self, dictionary)
if 'val' not in dictionary:
raise ParseException("Value ('val') not found in state")
self.value = float(dictionary['val'])
def factory(self, other):
return LogNum(other)
@autoconvDecorator
def __add__(self, other):
if self.value == NegativeInfinity:
return LogNum(other)
if other.value == NegativeInfinity:
return LogNum(self)
#return LogNum(math.exp(self.value) + math.exp(other.value))
if self.value > other.value:
return LogNum(self.value + math.log(1 + math.exp(other.value - self.value)), False)
else:
return LogNum(other.value + math.log(1 + math.exp(self.value - other.value)), False)
@autoconvDecorator
def __radd__(self, other):
return self + LogNum(other)
@autoconvDecorator
def __sub__(self, other):
return LogNum(math.exp(self.value) - math.exp(other.value))
@autoconvDecorator
def __mul__(self, other):
return LogNum(self.value + other.value, False)
@autoconvDecorator
def __div__(self, other):
return LogNum(self.value - other.value, False)
@autoconvDecorator
def __pow__(self, other):
return LogNum(self.value*math.exp(other.value), False)
@autoconvDecorator
def __lt__(self, other):
return self.value < other.value
@autoconvDecorator
def __le__(self, other):
return self.value <= other.value
@autoconvDecorator
def __eq__(self, other):
if not isinstance(other, LogNum):
other = LogNum(other)
return self.value == other.value
@autoconvDecorator
def __ne__(self, other):
return self.value != other.value
@autoconvDecorator
def __gt__(self, other):
return self.value > other.value
@autoconvDecorator
def __ge__(self, other):
return self.value >= other.value
def __float__(self, exp = True):
if exp:
return float(math.exp(self.value))
else:
return float(self.value)
def __str__(self, exp = True):
if exp:
return str(math.exp(self.value))
else:
return str(self.value)
def __repr__(self):
return 'LogNum:' + str(self.value)
def __hash__(self):
return hash(self.value)
if __name__ == "__main__":
def main():
a = LogNum(2, True)
b = LogNum(3, True)
print(a+b)
c = float(a)
print(c)
main()
|
999,543 | 1d513ded4c4402573070111f95061f0367c693be | vars = {
'chromium_git': 'https://chromium.googlesource.com'
}
deps = {
'src/buildtools':
(Var("chromium_git")) + '/chromium/buildtools.git@0f8e6e4b126ee88137930a0ae4776c4741808740',
'src/tools/gyp':
(Var("chromium_git")) + '/external/gyp.git@54b7dfc03f746b6a539ac38f2fb0815d10b54734',
'src/testing/gmock':
(Var("chromium_git")) + '/external/googlemock.git@0421b6f358139f02e102c9c332ce19a33faf75be',
'src/testing/gtest':
(Var("chromium_git")) + '/external/github.com/google/googletest.git@6f8a66431cb592dad629028a50b3dd418a408c87',
'src/third_party/icu':
(Var("chromium_git")) + '/chromium/deps/icu.git@8f91ea3a7e0413df3312204058da856058a8099b'
}
deps_os = {
'win': {
}
}
hooks = [
{
'action': [
'download_from_google_storage',
'--no_resume',
'--platform=win32',
'--no_auth',
'--bucket',
'chromium-gn',
'-s',
'src/buildtools/win/gn.exe.sha1'
],
'pattern':
'.',
'name':
'gn_win'
}
]
include_rules = [
'+base',
'+build'
]
skip_child_includes = [
]
|
999,544 | eb1ab740f6c520317a1c097e49f499b09cf18da6 | import math
import numpy as np
fout = open('C:\\Users\\Soheil\\Documents\\Visual Studio 2017\\Projects\\B\\B\\Boutput-small.out', 'w')
#fout = open('C:\\Users\\Soheil\\Documents\\Visual Studio 2017\\Projects\\B\\B\\Boutput-large.out', 'w')
T=int(input())
for t in range(1,T+1):
print(t)
N, R, O, Y, G, B,V=map(int, input().split())
unicorns=[]
for i in range(0,R):
unicorns.append('R')
for i in range(0,B):
unicorns.append('B')
for i in range(0,Y):
unicorns.append('Y')
for i in range(0,O):
unicorns.append('O')
for i in range(0,G):
unicorns.append('G')
for i in range(0,V):
unicorns.append('V')
#print(unicorns)
unicornList=[]
unicornList.append(unicorns[0])
impossibleflag=False
lastElement=[]
if(unicornList[0]=='R'):
R=R-1
lastElement=['Y','B','G']
elif(unicornList[0]=='B'):
B=B-1
lastElement=['R','Y','O']
elif(unicornList[0]=='Y'):
Y=Y-1
lastElement=['B','R','V']
else:
impossibleflag=True
for i in range(1,N):
currentColor=unicornList[len(unicornList)-1]
if(currentColor=='R'):
if(G>0):
G=G-1
unicornList.append('G')
elif(Y>B and Y>0):
Y=Y-1
unicornList.append('Y')
elif(B>0):
B=B-1
unicornList.append('B')
else:
impossibleflag=True
break;
if(currentColor=='B'):
if(O>0):
O=O-1
unicornList.append('O')
elif(Y>R and Y>0):
Y=Y-1
unicornList.append('Y')
elif(R>0):
R=R-1
unicornList.append('R')
else:
impossibleflag=True
if(currentColor=='Y'):
if(V>0):
V=V-1
unicornList.append('V')
elif(B>R and B>0):
B=B-1
unicornList.append('B')
elif(R>0):
R=R-1
unicornList.append('R')
else:
impossibleflag=True
if(currentColor=='O'):
if(B>0):
B=B-1
unicornList.append('B')
else:
impossibleflag=True
if(currentColor=='G'):
if(R>0):
R=R-1
unicornList.append('R')
else:
impossibleflag=True
if(currentColor=='V'):
if(Y>0):
Y=Y-1
unicornList.append('Y')
else:
impossibleflag=True
if(any(unicornList[len(unicornList)-1]==x for x in lastElement) and impossibleflag==False):
fout.write("Case #{}: {}\n".format(t,''.join(unicornList)))
#print(t,''.join(unicornList))
else:
#print(t,"IMPOSSIBLE")
fout.write("Case #{}: {}\n".format(t,"IMPOSSIBLE"))
fout.close() |
999,545 | ee860798758ddec08ce4776763af6edc2b9c0bbf | #!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 Indiana University
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of AEGeAn (http://github.com/BrendelGroup/AEGeAn) and is
# licensed under the ISC license: see LICENSE.
# -----------------------------------------------------------------------------
from __future__ import print_function
import argparse
import datetime
import glob
import os
import shutil
import subprocess
import sys
import LocusPocus
class GenomeDBCache(object):
def __init__(self, db, cachedir='cache', tempdir='temp'):
self.db = db
self.cachedir = cachedir
self.tempdir = tempdir
self.quiet = False
@property
def cacheroot(self):
return self.cachedir + '/' + self.db.label
@property
def newprefix(self):
date = datetime.datetime.now().strftime('%Y-%m-%d')
return '{c}/{d}-{a}'.format(c=self.cacheroot, d=date, a=self.db.acc)
@property
def oldprefix(self):
return '{c}/*-{a}'.format(c=self.cacheroot, a=self.db.acc)
def printlog(self, message):
if self.quiet:
return
print(message, file=sys.stderr)
def copy2cache(self, existingfile, newfile):
newdir = os.path.dirname(newfile)
subprocess.call(['mkdir', '-p', newdir])
shutil.copy2(existingfile, newfile)
def file_test(self, cachefile, testfile, newfile):
"""If test file is different from cache, copy to new file."""
assert os.path.isfile(testfile)
testsha1 = self.db.file_sha1(testfile)
assert os.path.isfile(cachefile)
cachesha1 = self.db.file_sha1(cachefile)
if testsha1 == cachesha1:
message = (
'Testfile "{tf}" and cachefile "{cf}" match ({sha}); cache is '
'up-to-date!'.format(tf=testfile, cf=cachefile, sha=testsha1)
)
self.printlog(message)
else:
message = (
'Testfile "{tf}" (sha1={ts}) and cachefile "{cf}" (sha1={cs}) '
'do not match match; cache is outdated; copying test file to '
'"{nf}"!'.format(tf=testfile, ts=testsha1, cf=cachefile,
cs=cachesha1, nf=newfile)
)
self.printlog(message)
self.copy2cache(testfile, newfile)
def run_tests(self):
suffixes = ['genomic.fna.gz', 'genomic.gff.gz', 'protein.faa.gz']
cachefilepatterns = [self.oldprefix + '_' + suf for suf in suffixes]
newfiles = [self.newprefix + '_' + suf for suf in suffixes]
tempfiles = [self.db.gdnapath, self.db.gff3path, self.db.protpath]
for cfp, nf, tf in zip(cachefilepatterns, newfiles, tempfiles):
cachefiles = sorted(glob.glob(cfp))
if len(cachefiles) == 0:
message = (
'No cachefile for comparison with {tf}; creating new cache'
' and copying to "{nf}"'.format(tf=tf, nf=nf)
)
self.printlog(message)
self.copy2cache(tf, nf)
else:
cachefile = cachefiles[-1]
self.file_test(cachefile, tf, nf)
def get_parser():
desc = 'Script to monitor RefSeq genomes and keep a local cache'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-v', '--version', action='version',
version='LocusPocus v%s' % LocusPocus.__version__)
parser.add_argument('-w', '--work', default='DELETEME', metavar='DIR',
help='temporary working directory')
parser.add_argument('-c', '--cache', default='cache', metavar='DIR',
help='cache directory; default is "cache/"')
parser.add_argument('-q', '--quiet', action='store_true',
help='do not print debugging output')
return parser
def main(args):
registry = LocusPocus.registry.Registry()
for label, config in registry.list_genomes():
db = registry.genome(label, workdir=args.work)
if 'source' not in db.config or db.config['source'] != 'refseq':
continue
if 'known_failing' in db.config:
continue
db.download()
cache = GenomeDBCache(db, tempdir=args.work, cachedir=args.cache)
cache.run_tests()
shutil.rmtree(args.work)
if __name__ == '__main__':
main(get_parser().parse_args())
|
999,546 | 3c22e16f3419c422323f3582399b22baef2bfc40 | # Lesson: Python basics
# Write some basic Python
# a.
number = 6
result = 0
result = (number * 2 + 10) / 2
int_result = int(result)
print (int_result)
print("Du hast " + str(number) + " ausgewählt, das magische Ergebnis ist " + str(int_result) + "!")
# b.
mail = "willy.wizard@zauberschule.de"
name = mail.split("@")
print(name[0])
mail2 = "info@helena-hexe.com"
name2 = mail2.split("@")
name3 = name2[1].split(".")
print(name3[0])
# c.
mail10 = "zarah.zauber@zauberberg.de"
mail20 = "info@trixie-trickser.com"
mail30 = "uwe_unhold@dunkelnetz.de"
clients = []
clients.append(mail10)
clients.append(mail20)
clients.append(mail30)
print(clients)
print(len(clients))
# d.
zauberer = ["Buehnenzauberer", "magic.com"]
print("@".join(zauberer))
|
999,547 | d84fded186ec782e464a46af5cf2ffb9d2a62e4e | import string
def is_pangram(s):
for letter in string.ascii_lowercase:
if letter not in s.lower():
return False
return True |
999,548 | a629765e560135fc903dd521d059d90a131db1e2 | # coding=utf-8
from OTLMOW.OTLModel.BaseClasses.AttributeInfo import AttributeInfo
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Datatypes.ComplexField import ComplexField
from OTLMOW.OTLModel.Datatypes.StringField import StringField
# Generated with OTLComplexDatatypeCreator. To modify: extend, do not edit
class DtcExterneReferentieWaarden(AttributeInfo):
def __init__(self, parent=None):
AttributeInfo.__init__(self, parent)
self._externReferentienummer = OTLAttribuut(field=StringField,
naam='externReferentienummer',
label='extern referentienummer',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcExterneReferentie.externReferentienummer',
definition='Referentienummer zoals gekend bij de externe partij bv. aannemer, VLCC, ...',
owner=self)
self._externePartij = OTLAttribuut(field=StringField,
naam='externePartij',
label='externe partij',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcExterneReferentie.externePartij',
definition='De naam van de externe partij waarvoor de referentie geldt. Dit kan een organisatie zijn maar ook een softwaretoepassing zoals bv. ABBA of VLCC.',
owner=self)
@property
def externReferentienummer(self):
"""Referentienummer zoals gekend bij de externe partij bv. aannemer, VLCC, ..."""
return self._externReferentienummer.get_waarde()
@externReferentienummer.setter
def externReferentienummer(self, value):
self._externReferentienummer.set_waarde(value, owner=self._parent)
@property
def externePartij(self):
"""De naam van de externe partij waarvoor de referentie geldt. Dit kan een organisatie zijn maar ook een softwaretoepassing zoals bv. ABBA of VLCC."""
return self._externePartij.get_waarde()
@externePartij.setter
def externePartij(self, value):
self._externePartij.set_waarde(value, owner=self._parent)
# Generated with OTLComplexDatatypeCreator. To modify: extend, do not edit
class DtcExterneReferentie(ComplexField, AttributeInfo):
"""Complex datatype waarmee een referentienummer zoals gekend bij de externe partij bv. aannemer, VLCC, ... kan toegevoegd worden aan object."""
naam = 'DtcExterneReferentie'
label = 'Externe referentie'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcExterneReferentie'
definition = 'Complex datatype waarmee een referentienummer zoals gekend bij de externe partij bv. aannemer, VLCC, ... kan toegevoegd worden aan object.'
waardeObject = DtcExterneReferentieWaarden
def __str__(self):
return ComplexField.__str__(self)
|
999,549 | a069a867778063e43ac4f744631967d38a96f9e2 | input = """Tile 2141:
#.#...#...
###..#...#
.....##...
..#.##..#.
.#....#..#
...#######
.....#...#
#.#......#
##........
########.#
Tile 3467:
..#.#....#
#.....#..#
....#....#
...#......
#.#....###
.#....#..#
#.........
.....#...#
.##...#...
##.#..#.#.
Tile 3389:
......##..
.##...#...
.#..###...
....##.#..
#..#.#....
.#.##..#..
#....##...
.#...#..##
..##.#...#
####..#..#
Tile 2797:
#..###..##
#.........
#.........
##.#....#.
.........#
.#...#...#
.........#
..#.#....#
..#...##..
...##..##.
Tile 1321:
.#....#..#
......#..#
#.#......#
#..#.#...#
.........#
.#..##....
...#.....#
#....#..#.
..##.##...
##.###.###
Tile 2741:
..#.#.####
#......##.
#.#...#.##
.##......#
..#.#....#
.#.#.....#
#........#
..#......#
#...#....#
...#..#..#
Tile 3169:
.##.#.#...
#.#..#...#
#...###...
..#...##..
.#..#.#.##
..#.......
...#......
.......#..
#.#.......
#.#.##..##
Tile 1097:
.##.#..#..
..#...##..
###.#.#...
..#..##...
......##.#
...#..#..#
#.#..#..##
.##.#...#.
##.#..#...
##..#...##
Tile 3457:
#.#.#.####
..#.#.#...
..#.......
##.....#..
#........#
.......#.#
#........#
#.......##
#..#.#..##
.##.##.#..
Tile 1471:
##.#.#.##.
..........
...#...#.#
#...#...#.
.....#..##
.......#.#
........##
...#...#.#
.#....####
.#.##.##.#
Tile 1531:
#....#..#.
#.#....#..
.......#..
.##.####.#
#..#.##..#
#........#
##.#.#...#
#...#....#
..........
...##..###
Tile 1523:
##..##.##.
...#..##..
#....#.#..
....#.####
#.#....#.#
....##..#.
#..#....#.
#..#..##.#
..#...##..
.#..####..
Tile 1447:
.#..#...##
.........#
..........
#.#....#.#
..#....##.
#......#.#
#.....#..#
#......#.#
..........
######....
Tile 2179:
#..#...#.#
..#......#
........#.
#.....#..#
......##..
#..#.##..#
#.##..#.#.
###.#..##.
...#..#..#
#.##..#.#.
Tile 1789:
#.#.#.##.#
#..#.#....
.#.#......
#.#....#..
##..#...#.
##..#....#
#.###.....
#..#.#.#..
#.......#.
#.##..####
Tile 2521:
.###..##..
..#.##....
##.#.....#
..##...#.#
#.###.#..#
.........#
.........#
#.........
.....#.#.#
.#....#.#.
Tile 3583:
..#..#.#..
..#.....##
#......#..
.........#
#...##.#.#
..#..#....
....#....#
#..#..#..#
#..#..###.
##...#.#.#
Tile 2633:
..#..#...#
..........
#..###..##
###......#
..#...#..#
#.###..###
##..#.##..
###.#.#..#
#....#....
#.#.######
Tile 1667:
###.##..#.
........#.
..#...##.#
........#.
#..##....#
#.#..#..##
.##......#
..#.......
#..#..#..#
.###..##.#
Tile 2003:
......#...
#.#.......
#..#......
#.....#..#
##.......#
.....##.##
#.###.....
#..#.#...#
......#...
#.##...#..
Tile 1103:
###.######
...######.
........#.
#....##..#
#.....#..#
#....#....
#..##..#.#
...##....#
#......#..
##...#.#.#
Tile 3167:
..##...##.
##.......#
...#...#.#
.......#..
.........#
........#.
##.......#
##.#.....#
..#.#.....
.#.####.##
Tile 1031:
#.##....##
#.#..#...#
#...#.....
#.#.......
#.#..##...
#...#....#
##..#....#
.#.#.....#
#.......##
...#####.#
Tile 2539:
##...#..##
..#....#..
......#..#
#...###..#
...#...#..
##....####
#.......##
#......#.#
.........#
.##.#..###
Tile 1871:
.#..#.#.##
...###...#
.........#
#...##....
...###...#
###.#..#.#
#..##....#
##.....#..
#....#..##
#####.##..
Tile 2383:
.#.....###
#.#.......
#.....#..#
.#...##..#
...#...#.#
#....#.#.#
...#....##
##.#..####
...#.....#
###.##.#..
Tile 1237:
##.#.#.##.
.##...####
#...#..#..
##.#..#.#.
##....#.#.
....#..###
....#....#
#.......##
..##...#.#
#.#..#..##
Tile 1093:
..##.###..
....###...
#..#.#....
#.........
..........
..#....#.#
#.....#...
........#.
......##.#
.#...#..##
Tile 1583:
..###.#..#
.........#
......##..
#....#..##
.###...###
..##..#.#.
....##.#.#
#.....#.#.
##.......#
#.#.##..#.
Tile 2243:
..#..#####
...#....##
#.....#..#
#......#.#
..#.#..#.#
.#...#...#
##.....#..
##.##.###.
#........#
#..#...#.#
Tile 1297:
..#......#
##..#.....
.##.#.....
.....##..#
.#....##..
#.#.#...##
.#........
.##.##.#.#
..#.###..#
#..#...#..
Tile 1877:
....###.#.
#....#....
.##...#..#
.........#
.........#
.#..##.#..
...#.....#
#.....#..#
...##.....
#.#.#.#...
Tile 3779:
.#...##..#
.#.......#
#.........
#.......##
.....#...#
.........#
...#.#...#
#.........
..#..#.###
#.#####.#.
Tile 2029:
....##..##
#..###...#
.....#..##
#...#.#...
##..###..#
#.#..##.#.
.......#.#
..##..#..#
..#....#.#
.##..#####
Tile 3221:
.#..#..##.
.###..#...
....#.##..
#.#.#.#.#.
#..#...#..
..........
#.#.....##
###....#.#
..###...#.
#......#..
Tile 3463:
.#.#......
#...#..#..
......#..#
#..#.##..#
#.....#..#
.#..###.#.
#####...#.
#.#....#..
#.........
##.###.#.#
Tile 3331:
##.###..##
##..#..#..
.....##...
..#...#.#.
#..#.#...#
...###....
.........#
.###....##
#...####.#
..#.##....
Tile 2473:
..####.#.#
##.#....##
......#.##
##.......#
##...##..#
##....#..#
.#.#.#....
......##.#
..#.##....
#..###...#
Tile 2459:
##.##....#
..........
#.#....###
#.........
##....##.#
...#......
###.......
..........
#.....#...
.#.###.#.#
Tile 3877:
...#.#.#..
#....#...#
.#......#.
......##.#
....#.##.#
.#.#..#.##
##........
....#.####
###...#.##
######..##
Tile 2161:
..##...#..
#....#...#
#.#.#.#..#
.##....###
...#..#...
#.........
.##....##.
.........#
#........#
.#.#.###..
Tile 3361:
#..##.....
.##.....#.
#........#
#..#......
#.........
##...##..#
..#..#....
#.#.......
...#.....#
###.#.##..
Tile 2089:
.##.####.#
...#....##
.###.#.#..
.........#
..#.##..#.
.###..##..
..........
...##.#..#
#..#....##
#...####.#
Tile 2711:
.##.#..###
#.#.....##
#..###.#.#
.##.....##
...#....#.
##......#.
#........#
..#.......
###..#.#.#
...#.##..#
Tile 1753:
....##.###
#........#
#...#...#.
..##.#....
#..#..#.##
#.....#..#
#.........
...#......
##.....#..
##..#.....
Tile 2953:
.#.####..#
...#.....#
.##.....##
#...#.....
##........
###.#..#.#
#..#.##...
#..#...#..
#.#......#
..#.#...#.
Tile 3593:
#..#.#..#.
#.#.....#.
#.#.......
#..#.....#
...#..#...
#........#
#....#.#.#
#..#......
..#......#
...#..#.##
Tile 1483:
..#.####..
.#.......#
##........
.......#..
##..#....#
#........#
..#...#...
#.........
###....#..
..#.#..##.
Tile 1657:
#####..##.
.###....#.
...#...#.#
#..#....#.
#.#...#.#.
.....##.##
.......#.#
####......
#..##....#
..#...#..#
Tile 3307:
.#..##.###
....#..#..
###..#..##
#.###.....
..###....#
#..#...#.#
.........#
....#.###.
..#......#
#.##.#.###
Tile 3617:
...#.#.#..
..........
.#....#.##
.#.#...#..
.#.....#..
.##.#..###
#..#......
#......#..
.#..#...#.
#.##..#..#
Tile 3001:
##...#####
..##.....#
#.....#.##
##........
....#.....
#....#..#.
...##.#...
.......#.#
#.....#..#
#.####....
Tile 1013:
.##..#....
#....##...
#...#...##
.#.###...#
....#..#..
##...#..#.
..#....#..
#...#..#.#
...#.....#
####.#####
Tile 2801:
#...##.##.
#.....#...
......#...
#....#...#
...##.....
.#......#.
....##..##
#....#####
....##...#
..#..###..
Tile 3079:
..##.#...#
#.....#...
...#.....#
..#..#...#
#..#...###
#.##......
#.###.....
#........#
#.....#.#.
..#.#...#.
Tile 1231:
####.#####
..#.#....#
.#....#...
.#..#....#
..#......#
#..#.###.#
.#.......#
#.##...#.#
.....#.###
.###.###.#
Tile 1171:
..#..#..##
#...#....#
#....#...#
#.........
###......#
#...#..#..
..#..#.#..
...#...#..
.....#....
###..#.###
Tile 3319:
###.#####.
..........
..##......
#.......#.
#..#.#..#.
.#.##.....
..###..#.#
#...#...##
##.#.#....
...##...##
Tile 2129:
##...#.###
#........#
.....#...#
##.......#
#.#.#.#...
......#...
#....#..#.
..........
.......#.#
.###.###.#
Tile 3371:
.##.......
#....#.#.#
..#.#...##
#.........
.#.....#..
#.....#.##
...#......
##....#...
..#..##..#
#..#.#####
Tile 1129:
#.##..#.#.
#......###
#.#....#..
.####...##
#...#..#.#
#..#......
.##.....##
#..#.....#
..........
#.###..#..
Tile 1399:
#.....#.##
#........#
.....##...
........##
...#...#..
#.###..##.
#..#.#.#.#
....##.#.#
#.##.....#
.#..###.##
Tile 2503:
#...#...##
#..#.#....
.....#.#.#
...#.....#
..#......#
..##.....#
.....#....
...#...#.#
.#........
#.###...#.
Tile 3529:
.##...####
.#.....###
#..#..##..
....##.#.#
.......##.
#........#
.........#
#.##......
##....#..#
....#.##.#
Tile 1181:
.#.##.#.##
#..#......
#........#
..##......
##.#...#.#
.#.##..#.#
##........
#..#..#..#
#.#...#..#
..#.#.##..
Tile 1759:
...#..#..#
..#...#...
##....#...
#.#..#.#..
#.......##
#...#...##
##......#.
##...#.###
##...#....
.##...#...
Tile 2543:
######.###
####....##
##.#..#.#.
##...#....
..........
..#......#
#.......##
##........
..###..##.
##....####
Tile 3613:
.##..#....
......#..#
#.........
....##...#
....#....#
.#..#.....
....##...#
#.#..#..#.
#..#.....#
##.##...##
Tile 3853:
....######
.####....#
..###.....
#.#.......
#..#..#...
....#...##
##....##.#
#..##..#..
#.#..#...#
..#######.
Tile 3187:
.#..#.####
#.##....#.
.....#....
##....#...
.#.##.#...
#...##...#
#....#...#
......#...
#....#..#.
#..###....
Tile 2437:
#..#.##.#.
##...###.#
.....#....
.#......#.
...##.#...
#..###...#
#....#....
.#....#..#
..........
.##.#.#..#
Tile 2917:
#.....#...
.#.#...#..
#.##.##..#
......#..#
...###....
##.#......
....#....#
###.....##
.#.#....##
####.#..##
Tile 1511:
#...###.#.
..##.#..##
..#...#...
##.#..#.##
##....#..#
#.........
#........#
........#.
#.....##.#
.#.....##.
Tile 2699:
.#.#......
#........#
..##...#..
......##..
..##..#..#
#.###..#.#
#.#..#...#
#.##.....#
..###.....
###..##.##
Tile 3259:
###.###..#
.........#
#.#.....#.
...#..#...
.##....###
##.......#
...#.#...#
...##...##
#.........
......#...
Tile 1429:
....##.##.
.###...#..
##.#...###
#.........
#....#...#
#.....#...
...#.##...
#.......##
.#......##
###.#.##..
Tile 2039:
#....###.#
.#..#.#..#
#.#.#...#.
..#.......
##...#..##
#.#.......
#......##.
..#....#.#
#.#.......
..####.#..
Tile 3637:
#.###.#.##
#......###
#.........
.....#...#
..#..#.##.
#..#....#.
........#.
..#.......
...#......
...#.###.#
Tile 1163:
##.###.##.
#.##.#....
..#.#.....
#..####.#.
#..#......
#......#.#
.#........
.#.##.....
#.##....##
##.#......
Tile 3761:
...##.###.
.........#
....#.....
##.......#
#.......#.
...##.....
#.#.......
.........#
#.#......#
.##.###...
Tile 1777:
##...##.##
....#.....
.#...#....
#....#.#.#
..#.#....#
#....###.#
##...#....
#...###..#
#...##..#.
......#.##
Tile 2857:
.#..#.##..
#..#.....#
##........
...#.#..#.
.##..#..#.
#####.##.#
#..##.....
##...#.#.#
#...#.....
...##.##.#
Tile 3061:
.#.#.###.#
#........#
.###.###..
..##.....#
#...#....#
#.#.#..###
.##......#
.....##...
....#.....
.#..#..#..
Tile 1721:
..#..#####
.#....###.
#..#...#..
#.........
#....#...#
...##..##.
#........#
#......#..
#.........
....##.###
Tile 2693:
.....#.#..
.#......#.
.#.....#.#
#....##..#
#..#......
####.##..#
#.#.....#.
..........
#.#....#.#
#.##..#...
Tile 1259:
#...#..###
#...#...##
##..#.#..#
#.#.......
#......#.#
###.......
#...#.....
...#.....#
.#...#...#
.#...##.##
Tile 1291:
#..##.#...
#.....##.#
.#..#....#
#...#...#.
..###.#...
###......#
#..#...#.#
#....#....
..##......
#..#..#.#.
Tile 1051:
#...####.#
##..#.....
.........#
.#..#..#..
##...#..##
...##....#
##........
.#......#.
...#.....#
###.###..#
Tile 2719:
.#.##..##.
.#.....#..
#.....#...
##.#.....#
#######...
##...##.#.
......##..
........##
..#....###
#..#..#.##
Tile 3533:
...##....#
##.......#
#....#....
....##...#
.....##.##
....#..##.
#.........
###...#.#.
#.........
#..####...
Tile 2659:
.##.#...#.
.#..#...#.
....#...##
.#..#.#..#
#.##.##..#
..##.....#
..#..#..##
..#.......
.........#
#....####.
Tile 3517:
#..##.#.#.
.#...#...#
.#.#.....#
##.......#
..#.......
#......#..
.........#
...##...#.
......#.##
..##...#..
Tile 2371:
###.##.##.
.#.#.##..#
.....#...#
#.#.#..#.#
####...#.#
#.#.#.##..
..#.#.....
##........
..#..#...#
#...#.###.
Tile 1601:
.####..##.
#.........
......#...
#.#..##.#.
..##.#.#.#
#.......#.
#..#.....#
#.....#.#.
..###....#
.#..######
Tile 2273:
..###.###.
....#.....
.##...#...
#.#...#...
....#.....
.#.....#.#
.#......#.
.#.#..#...
........##
##..###...
Tile 3917:
####.#...#
#...#...##
#.##...###
#..#...##.
....#....#
.........#
......#..#
###.#.#...
...##...##
#..#####.#
Tile 3701:
#..###...#
###..#...#
#.#...##.#
.##.#.#..#
.##..#..##
##..#.#...
.##.#.#.##
....##.#..
#.....#...
..##.##.#.
Tile 2081:
...####.#.
........##
#.#...#..#
.###...##.
#..#.##...
#....#....
#.#..###..
........##
..#......#
...##..###
Tile 2339:
#.#..#..##
.#..#...#.
........##
.##....#..
#.....#.#.
........#.
....#...##
.#....##..
#........#
#.##.#...#
Tile 1373:
###.###...
.........#
#......###
#...##...#
.###....#.
#..#.....#
..........
##..#..#.#
.###.#..##
###...##..
Tile 2837:
##....#...
...#...#.#
#.........
..#.......
##.#......
.#....#...
.###....#.
##......##
.....#....
.#.##.....
Tile 3803:
#..#..##..
.......#..
##.###....
#.#.#.#...
....##....
#.###..#.#
....#...##
........#.
..##.....#
..#..###..
Tile 3559:
#....##.##
...##.....
..#..#...#
#........#
##........
#.#.......
#.#..#.#.#
#....#..#.
#...#.....
##.#.#....
Tile 1549:
####.##.#.
......#...
#..#.#..##
#..#..#.#.
.#.....#.#
.........#
.........#
#....#...#
#.......#.
#####....#
Tile 1699:
.#.##.#.#.
.#..####..
.#.#......
.###..##.#
#........#
##.#......
.#.#.##..#
#.#..#....
#.......#.
.###.##..#
Tile 2647:
###.......
.......#.#
..#.#...#.
#..#...#..
..##......
#.#.#....#
....#.#.#.
#.........
#...#.....
#.###...##
Tile 1361:
...##.#.#.
..........
#.........
#.#....#..
##..##...#
....#.....
#.......#.
..........
#.##.#.#..
#..##.#.##
Tile 3851:
##........
...##.#...
....#.....
....####..
#.....#..#
#...#.##.#
#..#.....#
...#..##.#
#.#....#..
#..####...
Tile 3911:
##..##.##.
#.#.....##
...#.#...#
.....#....
.#...#..#.
...##.##..
#.#.#.####
.......#.#
...#.#....
##..#.##.#
Tile 3719:
##.##.###.
....#..#..
...#......
#......###
.#.#....##
..###...#.
....#..#.#
..........
#..#....#.
#####..###
Tile 2239:
###.##.##.
###......#
#.##.#..#.
..#....#..
......##..
.....#..#.
...#......
#.........
..........
.#.....###
Tile 1249:
#.#..####.
...#.....#
....#....#
.........#
.#.##...##
..#.####.#
...###....
#.........
#.....#...
.#..#.#...
Tile 2707:
.##.#.....
#.....##.#
.#..#...##
...#.#.#..
..#.......
.#.#...#..
#.....#...
#.##......
.#........
.###.#..##
Tile 2897:
..###...#.
...#..#..#
#......#..
#..#.....#
.....#....
..##..####
#...##...#
#.#......#
##......##
......#..#
Tile 3623:
#..#.....#
##.#...##.
##.......#
#.#....#.#
.#....#..#
#.##..#...
.##....#.#
.#.......#
#.#.#..#.#
#..#.###.#
Tile 2617:
.######...
#..#.....#
..........
#####....#
....#....#
##.###..##
......##..
.....##..#
#.........
...####.#.
Tile 1049:
..#.##...#
.#.....#.#
......#..#
#####.#...
......#...
....#.....
#.........
#.........
##.......#
..#..#.#.#
Tile 3019:
##.##..#..
##...#..##
...#....#.
..........
...#......
#...#...#.
####.##.#.
###......#
.....#....
######...#
Tile 3067:
#...#.#..#
#.#.##...#
....#.#..#
##.#......
....###..#
.#.....#..
.#.......#
......#..#
##.#.....#
.#####.###
Tile 3203:
#...###.##
#.#......#
..#..#..#.
.##...##.#
#.....###.
...##....#
#......#.#
.#........
.#.#....##
##.#####.#
Tile 2113:
.#.#.#..#.
#...#.....
###.....##
#.###.....
##........
#...##....
......####
......#..#
#..###.##.
..#.###.##
Tile 2591:
##..###...
........##
#........#
.....##..#
.........#
..#...#...
....#..#.#
#......#..
...#......
.##.##.#.#
Tile 1499:
.....##..#
...#......
.##.....#.
......##.#
.#........
#..##.#...
....##...#
#........#
....#.....
.#.##.#..#
Tile 3413:
#..#..##..
.#..#..###
#..#....##
##.......#
#.###.....
#....#..##
#........#
.....#...#
#...#....#
#.#..#..#.
Tile 3923:
.#...#####
#..####...
#.##....#.
#....##..#
#.#...##.#
..#..#....
#.#......#
.#..#..#.#
#.##....#.
###.###...
Tile 3527:
#.##...###
#......##.
#..#..#..#
....#....#
#.....#.##
..........
#........#
#..#.#...#
.##..#...#
#.#.##.###
Tile 2957:
###.#...#.
#.#..#....
...#.....#
#.#......#
.##.....#.
.........#
....#.....
#......#.#
...#......
#..##.##..
Tile 1481:
##....#...
...#...#.#
#.##......
.......#..
.#..#..#.#
.....###.#
#.........
.......##.
......#...
..###....#
Tile 1747:
###..#####
..#.##....
##.......#
.#........
#.##.....#
...#......
#........#
#.##...#.#
####......
##.##...#.
Tile 3659:
.##.###...
##..##.##.
####..#.##
....##.###
#.........
.##.#.#..#
.#...#..##
..####.##.
#..#..#...
..###...#.
Tile 2621:
#####....#
...#....#.
.##......#
..###....#
...#..#...
.........#
#.........
.#..#..#..
.#...##...
#......#.#
Tile 2927:
.#..####..
......#.##
.......#.#
#...#.##.#
##...#..##
#.....##.#
#.......#.
##........
#.....#...
##.#.##...
Tile 3727:
..#.....#.
#........#
####..#..#
##.#....##
...#......
###....#..
#..##..#.#
#.#.#...##
#...#.....
.####.##.#
Tile 2803:
##.##..###
#....###.#
#.....##..
#.#...#...
.#.#......
#...##..##
......#...
##.....#.#
.##.#..##.
..##.##...
Tile 2593:
.##.#....#
#.#.#.#...
#.......##
..........
...####...
....##....
#...##.#.#
#...##..##
.##......#
##....##.#
Tile 3109:
.###.#..##
#.........
.#...#..##
##...#...#
#..#....#.
#.........
#........#
#.##..#...
.#.......#
#.#####..#
Tile 3797:
#...##.##.
.....#..#.
##..##.#..
#........#
.#...#....
##..#.#...
##....#...
###.##...#
#......#.#
##.###..#.
Tile 1367:
##.#..##.#
..#.......
.#.......#
...#..##..
.##....##.
#..#.##..#
..#...#..#
#.#......#
#..#....##
...##..#..
Tile 2309:
##.#.####.
..###.....
...#....##
..#....#..
.#....#..#
##....#..#
..#......#
..##..##.#
.....###.#
#..##.##.#
Tile 2087:
.#.#.#.#.#
#.##....#.
#..#....#.
...#......
#.......##
...#..#...
#...###..#
.#....#..#
.#.###...#
.#.#######
Tile 2441:
....####.#
.........#
#.....##..
.#..##.#.#
####.#..##
#.#.....#.
#.#.#.##.#
....#...##
###..###.#
.#.....##.
Tile 3643:
##.#...###
..##....##
.....#....
#.#.#.#...
...#.#.#..
....##....
....##....
#.....##.#
#.......##
.......##.
Tile 3251:
##...#.#..
.#.......#
.#....##..
..#.....##
.#.#......
..#..##.##
.##..###.#
....#....#
##..##...#
#.#..###.#
Tile 1787:
#.#.#..#.#
..........
..#......#
..........
.#......#.
..#....#..
........#.
##....#..#
..##......
###.#...#.
"""
input = """Tile 2311:
..##.#..#.
##..#.....
#...##..#.
####.#...#
##.##.###.
##...#.###
.#.#.#..##
..#....#..
###...#.#.
..###..###
Tile 1951:
#.##...##.
#.####...#
.....#..##
#...######
.##.#....#
.###.#####
###.##.##.
.###....#.
..#.#..#.#
#...##.#..
Tile 1171:
####...##.
#..##.#..#
##.#..#.#.
.###.####.
..###.####
.##....##.
.#...####.
#.##.####.
####..#...
.....##...
Tile 1427:
###.##.#..
.#..#.##..
.#.##.#..#
#.#.#.##.#
....#...##
...##..##.
...#.#####
.#.####.#.
..#..###.#
..##.#..#.
Tile 1489:
##.#.#....
..##...#..
.##..##...
..#...#...
#####...#.
#..#.#.#.#
...#.#.#..
##.#...##.
..##.##.##
###.##.#..
Tile 2473:
#....####.
#..#.##...
#.##..#...
######.#.#
.#...#.#.#
.#########
.###.#..#.
########.#
##...##.#.
..###.#.#.
Tile 2971:
..#.#....#
#...###...
#.#.###...
##.##..#..
.#####..##
.#..####.#
#..#.#..#.
..####.###
..#.#.###.
...#.#.#.#
Tile 2729:
...#.#.#.#
####.#....
..#.#.....
....#..#.#
.##..##.#.
.#.####...
####.#.#..
##.####...
##..#.##..
#.##...##.
Tile 3079:
#.#.#####.
.#..######
..#.......
######....
####.#..#.
.#...#.##.
#.#####.##
..#.###...
..#.......
..#.###...""" |
999,550 | 0cf284f800ad97fd77f9f1746f963315f59f6872 | from question9 import Computer, TerminateSequence
import copy
class Scaffolding():
def __init__(self, input_list):
self.path = []
self.spaces_touched = set()
self.prev_direction = ""
self.input_map = input_list
self.ymax = len(self.input_map)
self.xmax = len(self.input_map[0])
self.cursor = self.find_start()
self.direction = "R"
def direction_to_walk(self):
if self.direction == "R":
return (1, 0)
elif self.direction == "L":
return (-1, 0)
elif self.direction == "U":
return (0, -1)
elif self.direction == "D":
return (0, 1)
def check_right(self):
if self.direction == "R":
return (0, 1)
elif self.direction == "L":
return (0, -1)
elif self.direction == "U":
return (1, 0)
elif self.direction == "D":
return (-1, 0)
def check_left(self):
if self.direction == "R":
return (0, -1)
elif self.direction == "L":
return (0, 1)
elif self.direction == "U":
return (-1, 0)
elif self.direction == "D":
return (1, 0)
def apply_rotation(self, left_or_right):
print(f"Weird case {self.direction}, {left_or_right}")
if self.direction == "R" and left_or_right == "L":
self.direction = "U"
elif self.direction == "R" and left_or_right == "R":
self.direction = "D"
elif self.direction == "L" and left_or_right=="R":
self.direction = "U"
elif self.direction == "L" and left_or_right=="L":
self.direction = "D"
elif self.direction == "U" and left_or_right == "L":
self.direction = "L"
elif self.direction == "U" and left_or_right == "R":
self.direction = "R"
elif self.direction == "D" and left_or_right == "L":
self.direction = "R"
elif self.direction == "D" and left_or_right == "R":
self.direction = "L"
else:
raise Exception(f"Weird case {self.direction}, {left_or_right}")
def find_start(self):
for y in range(self.ymax):
for x in range(self.xmax):
if self.input_map[y][x] == "^":
return (x, y)
raise Exception("Start not found")
def calculate_path(self):
steps = 0
path = []
(x, y) = self.cursor
while True:
print("Walking down")
walk_dir = self.direction_to_walk()
(x,y) = (x + walk_dir[0], y + walk_dir[1])
stop = False
if y >= self.ymax - 1:
stop = True
elif x >= self.xmax:
stop = True
elif x < 0:
stop = True
elif y < 0:
stop = True
if stop is False and self.input_map[y][x] == "#":
steps += 1
self.print_map((x,y))
else:
print("walk complete")
self.cursor = (x - walk_dir[0], y - walk_dir[1])
(x, y) = self.cursor
path.append(steps)
steps = 0
rotate = self.calculate_rotation(self.cursor)
print(rotate)
if rotate is not None:
path.append(rotate)
else:
return path
def calculate_rotation(self, cursor):
left = self.check_left()
right = self.check_right()
yleft = cursor[1] + left[1]
xleft = cursor[0] + left[0]
yright = cursor[1] + right[1]
xright = cursor[0] + right[0]
if yleft <= self.ymax - 2 and yleft >= 0 and xleft <= self.xmax - 1 and xleft >= 0:
if self.input_map[yleft][xleft] == "#":
self.apply_rotation("L")
return "L"
if yright <= self.ymax - 2 and yright >= 0 and xright <= self.xmax - 1 and xright >= 0:
if self.input_map[yright][xright] == "#":
self.apply_rotation("R")
return "R"
else:
return
def print_map(self, cursor):
import os
os.system("cls")
if self.direction == "R":
icon = ">"
elif self.direction == "L":
icon = "<"
elif self.direction == "U":
icon = "^"
elif self.direction == "D":
icon = "v"
imap = copy.deepcopy(self.input_map)
imap[cursor[1]][cursor[0]] = icon
print("\n".join(["".join(i) for i in imap]))
def calculate_intersections(input_list):
row_count = len(input_list)
col_count = len(input_list[0])
intersections = []
for y in range(1, row_count-2):
for x in range(1, col_count-2):
up = input_list[y-1][x] == "#"
down = input_list[y+1][x] == "#"
left = input_list[y][x-1] == "#"
right = input_list[y][x+1] == "#"
this = input_list[y][x] == "#"
if (up and down and left and right and this):
print((x,y))
input_list[y][x] = "O"
intersections.append((x,y))
alignment_parameter = find_intersections(intersections)
input_list.pop()
return alignment_parameter
def find_intersections(intersections):
alignment_parameter = 0
for intersect in intersections:
alignment_parameter += intersect[0] * intersect[1]
return alignment_parameter
def question_17():
with open("data\\q17input.txt") as f:
input_data = f.read()
intcode_program = input_data.split(",")
intcode_program = list(map(int, intcode_program))
computer = Computer(intcode_program,[])
output = computer.execute_until_terminate()
mapped = []
for i in output:
mapped.append(chr(i))
mapped_data = []
while True:
try:
ind = mapped.index("\n")
except ValueError:
break
mapped_data.append(mapped[0:ind])
mapped = mapped[ind+1:]
#print(f"Question 17a: {calculate_intersections(mapped_data)}")
scaffold = Scaffolding(mapped_data)
path = scaffold.calculate_path()
print(path)
if __name__ == "__main__":
question_17()
# Ugh, don't have to consider all the paths through the scaffold |
999,551 | 68c728c251618beccb23436f02968ef6427027ed | from __future__ import unicode_literals
from django.apps import AppConfig
class CrispyFormsAppConfig(AppConfig):
name = 'crispy_forms_app'
|
999,552 | 38d94933f21cb0ef28724a1f54b422b07b1b57f6 | #!/usr/bin/python
from Project.logit import print_to_log # local file
from os import system, path # import two functions
from platform import system as platform # import platform system as platform
from subprocess import Popen, PIPE # needed for built-in terminal
from Tkinter import * # need this too
class Main(object):
"""
My own compiler.
"""
def __init__(self):
"""
Does nothing.
"""
pass
def create_file(self, value=None):
"""
Creates a file then opens it.
"""
if not path.isdir("Project"):
system("mkdir Project")
string_to_systemize = "echo \"#!/usr/bin/python\n" + \
"# Please use fp = open(\'Project/yourfile.*\') " + \
"when opening YOUR files\n" + \
"# to not lose YOUR file in the jumble of OTHER files.\n" + \
"# Also, do NOT delete the very first comment line.\n" + \
"# \'logs.txt\' is your friend for your error logs.\"" + \
"> Project/myfile.py"
system(string_to_systemize)
system("chmod +x Project/myfile.py")
self.open_file()
def save_file(self, value=None):
"""
Saves a file.
"""
text_to_save = str(self.my_text.get("1.0", END))
fp = open("Project/myfile.py", "w")
fp.write(text_to_save)
fp.close()
def open_file(self, value=None):
"""
Opens the file.
"""
fp = open("Project/myfile.py")
file_contents = fp.read()
self.my_text.delete("1.0", END)
self.my_text.insert("1.0", file_contents)
fp.close()
def run_file(self, value=None):
"""
Runs the file and outputs it to the text window.
"""
self.save_file()
self.p = Popen("./Project/myfile.py", stdout=PIPE, stderr=PIPE)
output, errors = self.p.communicate()
self.my_output.delete("1.0", END)
self.my_output.insert("1.0", output)
if errors != "":
print_to_log(errors)
self.my_output.configure(fg="red")
else:
self.my_output.configure(fg="white")
self.my_output.insert("1.0", errors)
def select_all(self, value=None):
"""
Selects the whole text.
"""
self.my_text.tag_add(SEL, "1.0", END)
self.my_text.mark_set(INSERT, "1.0")
self.my_text.see(INSERT)
return "break"
def copy_text(self, value=None):
"""
Copies text.
"""
self.my_text.event_generate("<<Copy>>")
def paste_text(self, value=None):
"""
Pastes text.
"""
self.my_text.event_generate("<<Paste>>")
self.current_area()
def cut_text(self, value=None):
"""
Cuts text.
"""
self.my_text.event_generate("<<Cut>>")
self.current_area()
def insert_tab(self, value=None):
"""
Inserts a tab, or 4 spaces.
"""
self.my_text.insert(INSERT, " " * 3)
def current_area(self, value=None):
"""
Gets the current area of the cursor and outputs it.
"""
my_area = self.my_text.index(INSERT)
str(my_area)
for x in range(0, len(my_area)):
if my_area[x] == ".":
my_y = my_area[0:x]
my_x = my_area[x + 1:]
my_new_area = "Ln: " + my_y + " | Col: " + my_x
self.my_location.config(text=my_new_area)
def quit_window(self, value=None):
"""
Quits a Tk window.
"""
exit()
def passer(self, value=None):
"""
Literally is just a pass function.
"""
pass
def go_to_line(self, value=None):
"""
Goes to a line.
"""
self.searcher = Toplevel()
self.searcher.wm_title("Go To Line")
self.line_number = Entry(self.searcher)
on_clicker = Button(self.searcher, command=self.go_to, text="Go")
self.line_number.pack()
on_clicker.pack()
def go_to(self, value=None):
"""
Goes to a specific line.
"""
self.go_to_this_line = self.line_number.get()
self.my_text.mark_set(INSERT, str(float(self.go_to_this_line)))
self.current_area()
self.my_text.see(INSERT)
self.searcher.destroy()
def undo(self, value=None):
"""
Undoes something.
"""
self.my_text.event_generate("<<Undo>>")
self.current_area()
def redo(self, value=None):
"""
Redoes something.
"""
self.my_text.event_generate("<<Redo>>")
self.current_area()
def clear_edit(self, value=None):
"""
Clears the edit clipboard.
"""
self.my_text.edit_reset()
def window_front(self):
"""
Sets window to front.
"""
if platform() == "Darwin":
system("""/usr/bin/osascript -e 'tell app "Finder" to set \
frontmost of process "Python" to true' """)
def start(self):
"""
Starts the compiler.
"""
root = Tk()
menubar = Menu(root)
self.my_text = Text(root,
bg="gray9", insertbackground="white",
fg="white", wrap="word", undo=True,
tabs="1c", height="20", width="80")
self.my_output = Text(root,
bg="gray9", insertbackground="white",
fg="white", height="10", width="80")
self.my_location = Label(root,
bg="gray9", fg="white", justify="right",
text="Ln: 1 | Col: 0")
self.my_text.pack()
self.my_output.pack()
self.my_location.pack(side=RIGHT)
self.my_text.bind("<Command-s>", self.save_file)
self.my_text.bind("<Command-o>", self.open_file)
self.my_text.bind("<Command-r>", self.run_file)
self.my_text.bind("<Command-n>", self.create_file)
self.my_text.bind("<Command-a>", self.select_all)
self.my_text.bind("<Tab>", self.insert_tab)
self.my_text.bind("<KeyRelease>", self.current_area)
self.my_text.bind("<ButtonRelease-1>", self.current_area)
self.my_text.bind("<Command-q>", self.quit_window)
self.my_text.bind("<Command-Control-l>", self.clear_edit)
self.my_text.event_add("<<Paste>>", "<Command-v>")
self.my_text.event_add("<<Copy>>", "<Command-c>")
self.my_text.event_add("<<Cut>>", "<Command-x>")
self.my_text.focus_set()
filemenu = Menu(menubar, tearoff=0)
editmenu = Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=self.create_file)
filemenu.add_command(label="Save", command=self.save_file)
filemenu.add_command(label="Open", command=self.open_file)
filemenu.add_command(label="Run", command=self.run_file)
filemenu.add_command(label="Quit", command=self.quit_window)
editmenu.add_command(label="Select All", command=self.select_all)
editmenu.add_command(label="Undo", command=self.undo)
editmenu.add_command(label="Redo", command=self.redo)
editmenu.add_command(label="Copy", command=self.copy_text)
editmenu.add_command(label="Paste", command=self.paste_text)
editmenu.add_command(label="Cut", command=self.cut_text)
editmenu.add_command(label="Go To Line", command=self.go_to_line)
menubar.add_cascade(label="Files", menu=filemenu)
menubar.add_cascade(label="EditText", menu=editmenu)
root.config(menu=menubar)
root.wm_title("Mere")
self.window_front()
root.mainloop()
my_compiler = Main() # TODO: Make more features
my_compiler.start()
|
999,553 | b2139ecb48a81bd8cfa4cfb7ca94e2243c6b386d | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 15:54:04 2020
@author: descentis
"""
import numpy as np
import matplotlib.pyplot as plt
# Make a fake dataset
height = [46, 93.25, 93.60, 95.02, 95.38]
bars = (r'$k = 2$', r'$k = \sqrt{n\left(\dfrac{m-d}{m+d}\right)}$', r'$C = n^2$', r'$k = 1000$', r'$k = n-1$')
y_pos = np.arange(len(bars))
plt.bar(y_pos, height, color=(0.1, 0.1, 0.1, 0.1), edgecolor='blue')
label = ['93.25%', '93.60%', '95.02%', '95.38%']
plt.text(x=0-0.2, y=46 - 6, s = '46%', size=12)
plt.text(x=1-0.35, y=93.25 - 6, s = '93.25%', size=12)
plt.text(x=2-0.35, y=93.60 - 6, s = '93.60%', size=12)
plt.text(x=3-0.35, y=95.02 - 6, s = '95.02%', size=12)
plt.text(x=4-0.35, y=95.38 - 6, s = '95.38%', size=12)
plt.xticks(y_pos, bars, fontsize=8)
plt.savefig('overall_compression.png', dpi=800)
plt.show() |
999,554 | 33caaf13069e63a36e76264357d916dec6c06b54 | a=eval(input("enter the number"))
rev=0
while(a>0):
d=a%10
rev=rev*10+d
a=a//10
print(rev)
|
999,555 | 279c777c96e4e2bab82c523f714fd8e0aefd4484 | from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from wtb.models import WtbBrand, WtbEntity, WtbBrandUpdateLog
@admin.register(WtbBrand)
class WtbBrandModelAdmin(GuardedModelAdmin):
list_display = ('__str__', 'storescraper_class')
@admin.register(WtbEntity)
class WtbEntityModelAdmin(admin.ModelAdmin):
list_display = ('__str__', 'brand', 'category', 'product', 'name', 'key')
list_filter = ('brand', 'category')
readonly_fields = ('product', 'last_association_user')
@admin.register(WtbBrandUpdateLog)
class WtbBrandUpdateLogModelAdmin(admin.ModelAdmin):
list_display = ('brand', 'status', 'creation_date', 'last_updated',
'status')
list_filter = ('brand', )
|
999,556 | 120bc28e4a22000cde0a8a0d23707208ee5809e6 | import socket
import re
# initialize the game board
def init_game(csocket):
board = [' ' for x in range(10)] #list to contain player's moves
print("I will start the game")
print_board(board) # to print the intialized board
print("Type q when you want to quit")
start_game(csocket, board) #start the game
# to start the game
def start_game(csocket, board):
#while the board is not full
while not isBoardFull(board):
print("Your turn")
#get move from the server
server_move = input("Enter a number between 1 to 9 to mark your position in the grid \n > ")
#check if its not a quit message
if(str(server_move) == 'q'):
server_quit = "Server has quit the game..Byee"
print("Thanks for playing")
csocket.send(bytes(server_quit, 'utf-8'))
break
#change the input to int type
server_move = int(server_move)
#while the input is invalid, get input from the server again
while not checkMoveIsValid(board, server_move):
server_move = int(input("Invalid....Enter your position in the grid (1-9) : "))
#update board if the move is valid
board[server_move] = 'X'
#print the board
print_board(board)
#if the board is full, but X is not the winner, then the match is draw
if (isBoardFull(board) and not checkWinner(board, 'X')):
match_draw = "Game Draw!!!! Until next time"
print(match_draw)
csocket.send(bytes(match_draw, 'utf-8'))
csocket.send(bytes(str(server_move), 'utf-8'))
break
#check if server wins
if checkWinner(board, 'X'):
print("Server wins this time")
server_win = "Server Won!!! You lose... Good luck next time :) "
csocket.send(bytes(server_win, 'utf-8'))
csocket.send(bytes(str(server_move), 'utf-8'))
break
#If the server has not won yet and the board is not full
else:
#send the server's move to the client
csocket.send(bytes(str(server_move), 'utf-8'))
#wait for the client to play
print("Wait for the client to playy ...")
#get the client's move
client_move = csocket.recv(1024).decode('utf-8')
#if the client has sent a quit message, then break out of the loop
if(re.search("quit the game", client_move)):
print(client_move)
break
#else, conver the client's move to integer type
else:
client_move = int(client_move)
#update the board with client's move
board[client_move] = 'O'
#check if the board is full and the client has not won, then the match is draw
if (isBoardFull(board) and not checkWinner(board, 'O')):
match_draw = "Game Draw!!!! Until next time"
print(match_draw)
csocket.send(bytes(match_draw, 'utf-8'))
break
#if the client has won, send the winning message to client
if checkWinner(board, 'O'):
print("Client's Move: ", client_move)
print_board(board) #print the board
print("Client wins this time")
client_win = "Congratulations...You Won!!! See you next time :) "
csocket.send(bytes(client_win, 'utf-8'))
break
#If the client has not won yet and the board is not full, continue the game
else:
print("Client's Move: ", client_move)
print_board(board)
#Function to check if 'X' or 'O' has won the game
def checkWinner(board, letter):
return ((board[1] == letter and board[2] == letter and board[3] == letter) or #across top
(board[4] == letter and board[5] == letter and board[6] == letter) or #across middle
(board[7] == letter and board[8] == letter and board[9] == letter) or #across bottom
(board[1] == letter and board[4] == letter and board[7] == letter) or #down left
(board[2] == letter and board[5] == letter and board[8] == letter) or #down middle
(board[3] == letter and board[6] == letter and board[9] == letter) or #down right
(board[1] == letter and board[5] == letter and board[9] == letter) or #diagonal right
(board[3] == letter and board[5] == letter and board[7] == letter)) #diagonal left
#Function to check if the move is valid
def checkMoveIsValid(board, move):
#if the number is invalid
if move < 1 or move > 9:
return False
#if the move is already taken
if board[move] == 'O' or board[move] == 'X':
return False
else:
return True
#function to check if the board is full
def isBoardFull(board):
#If the board has empty spaces, return False
if board.count(' ') > 1:
return False
#else, return true
else:
return True
#function to print the game board
def print_board(board):
print('******************')
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
print('******************')
#function to start the chat with the client
def client_chat(server_socket):
#Server accepts the connection from the client
csocket, client_address = server_socket.accept()
print("Connected by ", str(client_address))
welcome = "Welcome to the game. Do you want to start the game? Send yes if you want to start or no if you want to quit \n (yes, no)"
csocket.send(bytes(welcome, 'utf-8'))
msg_recv = ""
msg_recv = csocket.recv(1024).decode('utf-8')
#if the client accepts to play the game, initialize the game
if(re.search("yes",msg_recv)):
init_game(csocket)
#else, send goodbye message
else:
goodbye_msg = "Okay..Take care..See you next time"
print("Client rejected the game request..Goodbye")
csocket.send(bytes(goodbye_msg, 'utf-8'))
return
#Main Function
def main():
#Host and port for binding to the server
host = socket.gethostbyname("localhost")
port = 12345
#set up socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#To overcome the address already in use message to modify the socket to reuse the address
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#Bind the host and port with the server
server_socket.bind((host, port))
#Listen for connection
server_socket.listen(5)
print("Server listening on : ", host, "on port: ", port)
#call for chat function
client_chat(server_socket)
#Once the function is returned, close the socket connection
server_socket.close()
#Call for main function
if __name__ == '__main__':
main() |
999,557 | 1ae1b887c413bab285b1d861d024be4efc10aa7e | import json
import urllib2
from bs4 import BeautifulSoup
from pyalexaskill.AlexaBaseHandler import AlexaBaseHandler
from utilities.utils import IntentHandler
class AlexaTivixHandler(AlexaBaseHandler):
# Sample concrete implementation of the AlexaBaseHandler to test the
# deployment scripts and process.
# All on_ handlers call the same test response changing the request type
# spoken.
def __init__(self, app_id=None):
super(self.__class__, self).__init__(app_id)
def _test_response(self, msg):
session_attributes = {}
card_title = "Test Response"
card_output = "Test card output"
speech_output = "Welcome to the Python Alexa Test Deployment for request type {0}. It seems to have worked".format(
msg)
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Reprompt text for the Alexa Test Deployment"
should_end_session = True
speechlet = self._build_speechlet_response(card_title, card_output, speech_output, reprompt_text,
should_end_session)
return self._build_response(session_attributes, speechlet)
def on_processing_error(self, event, context, exc):
session_attributes = {}
card_title = "Error"
speech_output = "I am having difficulty fulfilling your request."
reprompt_text = "I did not hear you"
should_end_session = True
if exc:
speech_output = "I am having difficulty fulfilling your request. {0}".format(exc.message)
card_output = speech_output
speechlet = self._build_speechlet_response(card_title,
card_output,
speech_output,
reprompt_text,
should_end_session)
return self._build_response(session_attributes, speechlet)
def on_launchrequest(self, launch_request, session):
session_attributes = {}
card_title = "Welcome"
card_output = "Welcome to the Tivix Alexa App"
speech_output = "Welcome to the Tivix Alexa App. How may we innovate engineering for you today?"
reprompt_text = "I'm afraid I did not hear you"
should_end_session = False
speechlet = self._build_speechlet_response(card_title,
card_output,
speech_output,
reprompt_text,
should_end_session)
return self._build_response(session_attributes, speechlet)
def on_session_started(self, session_started_request, session):
return self._test_response("on session started")
def on_intent(self, intent_request, session):
response = None
session_attributes = {}
reprompt_text = "I'm afraid I did not hear you"
should_end_session = False
intent_name = self._get_intent_name(intent_request)
slot_packet = self.assemble_slot_packets(intent_request)
intent_handler = IntentHandler(slot_packet, intent_request, session)
# get the speech output
speech_packet= intent_handler.run_handler(intent_name)
speechlet = self._build_speechlet_response(
speech_packet['card_title'],
speech_packet['card_output'],
speech_packet['speech_output'],
speech_packet['reprompt_text'],
speech_packet['should_end_session']
)
response = self._build_response(session_attributes, speechlet)
# else:
# raise ValueError("Invalid intent")
return response
def on_session_ended(self, session_end_request, session):
return self._test_response("on session end")
def on_help_intent(self, intent_request, session):
session_attributes = {}
card_title = "Help"
card_output = "Card Help"
speech_output = "Speech Help"
reprompt_text = "I did not hear you, {0}".format(speech_output)
should_end_session = False
speechlet = self._build_speechlet_response(card_title,
card_output,
speech_output,
reprompt_text,
should_end_session)
return self._build_response(session_attributes, speechlet)
def on_stop_intent(self, intent_request, session):
return self.on_cancel_intent(intent_request, session)
def on_cancel_intent(self, intent_request, session):
session_attributes = {}
card_title = "Bye"
card_output = "Thank you and Good-bye"
speech_output = "Thank you and Good-bye"
reprompt_text = "{0}".format(speech_output)
should_end_session = True
speechlet = self._build_speechlet_response(card_title,
card_output,
speech_output,
reprompt_text,
should_end_session)
return self._build_response(session_attributes, speechlet)
def on_no_intent(self, intent_request, session):
return self._test_response("on no intent")
def on_yes_intent(self, intent_request, session):
return self._test_response("on yes intent")
def on_repeat_intent(self, intent_request, session):
return self._test_response("on repeat intent")
def on_startover_intent(self, intent_request, session):
return self._test_response("on start over intent")
# Tivix
def assemble_slot_packets(self, intent_request):
slots = {}
with open('assets/IntentSchema.json') as data_file:
data = json.load(data_file)
for intent in data['intents']:
if 'slots' in intent:
for slot in intent['slots']:
slot_name = slot['name']
value = 'None'
exists = self._slot_exists(slot_name, intent_request)
if exists:
value = self._get_slot_value(slot_name, intent_request)
slots[slot_name] = { 'exists': exists, 'value': value }
return slots
|
999,558 | 6c27983036172d018f07c059c8c56e67a983d6a0 | # write a program that calculates the average student height from a List of heights.
student_heights = input("Input a list of student heights ").split()
for n in range(0, len(student_heights)):
student_heights[n] = int(student_heights[n])
av_height=0
sum_height=0
i=0
for h in student_heights:
sum_height=sum_height+student_heights[i]
i+=1
av_height=sum_height/i
print(round(av_height)) |
999,559 | c2536a6110414ee2c004c0e57a39ff5e0e32e55c | def is_pyramid(n):
if n == 0:
return 0, False
is_pyr = 1
sum = 0
k = 0
while n != sum:
if sum > n:
is_pyr = 0
break
k += 1
sum += k ** 2
if is_pyr == 1:
return k, True
else:
return k, False
n = input()
if n.isdigit():
k, ans = is_pyramid(int(n))
if ans:
print(k)
else:
print("It is impossible")
else:
print("Not a positive int number")
|
999,560 | e8585df1d1f93c1d6eb905bcce12efff566a31e2 | from django.conf import settings
from django.contrib.auth.forms import AuthenticationForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.simplejson import dumps
from torch.account.forms import UserForm as TorchUserForm
from torch.account.views import _deal_with_form_validation
from torch.idea.forms import make_IdeaForm
from torch.idea.models import Idea, order_by_popular
from torch.vote.models import create_vote
def create(request):
IdeaForm = make_IdeaForm(request.user)
is_create = False
# If the user is logged in then we don't need a form.
if request.user.is_authenticated():
UserForm = None
elif request.method == 'POST':
# Figure out if its creating a user account or logging in.
if 'first_name' in request.POST:
is_create = True
UserForm = TorchUserForm
else:
UserForm = AuthenticationForm
if request.method == 'POST':
# We need to authenticate the user and rebuild the IdeaForm
user_form_is_valid = True
if UserForm is not None:
user_form = UserForm(data=request.POST)
user_form_is_valid, user = _deal_with_form_validation(
request,
user_form,
is_create,
)
if user_form_is_valid:
IdeaForm = make_IdeaForm(user)
idea_form = IdeaForm(request.POST)
if idea_form.is_valid() and user_form_is_valid:
idea = idea_form.save()
url = reverse('idea_view', kwargs={'idea_id': idea.pk})
return HttpResponse(
dumps(
{'url': url},
),
mimetype="application/json",
)
else:
return HttpResponse(
dumps(
{'errors': str(idea_form.errors) + str(user_form.errors)},
),
mimetype="application/json",
)
else:
idea_form = IdeaForm()
user_form = TorchUserForm()
context = RequestContext(request, {
'user_form': user_form,
'idea_form': idea_form,
})
return render_to_response(
'idea/create.html',
context_instance=context,
)
def view(request, idea_id):
idea = get_object_or_404(
Idea.objects.select_related(),
pk=idea_id,
)
context = RequestContext(request, {
'idea': idea,
})
return render_to_response(
'idea/view.html',
context_instance=context,
)
def vote(request, idea_id):
idea = get_object_or_404(
Idea.objects.select_related(),
pk=idea_id,
)
ip = request.META['REMOTE_ADDR']
vote, created = create_vote(
request.user,
idea,
ip='%s' % ip,
)
return HttpResponse(
dumps(
{'created': created},
),
mimetype="application/json",
)
def manage(request):
idea_qs = Idea.objects.all().select_related()
sort = request.GET.get('sort')
available_sorts = {
'num_votes': '-num_votes',
'popular': '',
}
if sort and sort in available_sorts:
if sort == 'popular':
idea_qs = order_by_popular(idea_qs)
else:
idea_qs = idea_qs.order_by(available_sorts[sort])
paginator = Paginator(idea_qs, settings.TORCH_PAGINATION)
page = request.GET.get('page')
try:
ideas = paginator.page(page)
except PageNotAnInteger:
ideas = paginator.page(1)
except EmptyPage:
ideas = paginator.page(paginator.num_pages)
context = RequestContext(request, {
'sort': sort,
'ideas': ideas,
})
return render_to_response(
'idea/manage.html',
context_instance=context,
)
|
999,561 | 7829e7133c1f5213e59720d791d3d70e7a17cf97 | # -*- coding: utf-8 -*-
"""
Created on Sun May 31 14:19:13 2020
@author: renluqin
"""
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import prince
"""load documents"""
combats = pd.read_csv("pokemon-challenge/combats.csv")
pokemon = pd.read_csv("pokemon-challenge/pokemon.csv")
tests = pd.read_csv("pokemon-challenge/tests.csv")
"""fill missing values"""
data = pokemon.copy()
data["Name"] = pokemon['Name'].fillna('Primeape')
data["Type 2"] = pokemon['Type 2'].fillna('No Type 2')
"""feature engineering"""
# calculate the win % of each pokemon
# add the calculation to the pokemon dataset
total_Wins = combats.Winner.value_counts()
# get the number of wins for each pokemon
numberOfWins = combats.groupby('Winner').count()
#both methods produce the same results
countByFirst = combats.groupby('Second_pokemon').count()
countBySecond = combats.groupby('First_pokemon').count()
numberOfWins = numberOfWins.sort_index()
numberOfWins['Total Fights'] = countByFirst.Winner + countBySecond.Winner
numberOfWins['Win Percentage']= numberOfWins.First_pokemon/numberOfWins['Total Fights']
# merge the winning dataset and the original pokemon dataset
results2 = pd.merge(data, numberOfWins, right_index = True, left_on='#')
results3 = pd.merge(data, numberOfWins, left_on='#', right_index = True, how='left')
results4 = results3.drop(columns=['First_pokemon', 'Second_pokemon','Total Fights'])
"""correlation map"""
f,ax = plt.subplots(figsize=(9, 9))
sns.heatmap(results4.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
"""PCA"""
results2["win"] = 'win percentage < 0.5'
results2.loc[results3["Win Percentage"]>0.5,"win"] = 'win percentage > 0.5'
X = results2.iloc[:,4:10]
pca = prince.PCA(n_components=6,
n_iter=3,
rescale_with_mean=True,
rescale_with_std=True,
copy=True,
check_input=True,
engine='auto',
random_state=42)
pca = pca.fit(X)
ax = pca.plot_row_coordinates(
X,
ax=None,
figsize=(6, 6),
x_component=0,
y_component=1,
labels=None,
color_labels=results2.iloc[:,-1],
ellipse_outline=False,
ellipse_fill=True,
show_points=True)
"""One-class SVM to detect outliers"""
from sklearn.svm import OneClassSVM
from numpy import quantile, where
svm = OneClassSVM(kernel='rbf', gamma=0.001, nu=0.02)
pred = svm.fit_predict(X)
scores = svm.score_samples(X)
thresh = quantile(scores, 0.005)
index = where(scores<=thresh)
index = index[0]
values = X.iloc[index]
plt.scatter(X['Speed'], X['Attack'])
plt.scatter(values['Speed'], values['Attack'], color='r')
plt.xlabel('Speed')
plt.ylabel('Attack')
plt.title("Outliers according to one-class SVM", fontsize=18)
"""onehot+pca"""
from sklearn.preprocessing import OneHotEncoder
results3[['Type 1','Type 2','Generation','Legendary']] = results3[['Type 1','Type 2','Generation','Legendary']].astype('category')
results3[['HP','Attack','Defense','Sp. Atk','Sp. Def','Speed']] = results3[['HP','Attack','Defense','Sp. Atk','Sp. Def','Speed']].astype('float64')
quali = results3[['Type 1','Type 2', 'Generation', 'Legendary']]
enc = OneHotEncoder(categories = 'auto')
quali = enc.fit_transform(quali).toarray()
quali = pd.DataFrame(quali)
quant = results3[['HP','Attack','Defense','Sp. Atk','Sp. Def','Speed']]
X = pd.concat( [quali, quant], axis=1 )
from sklearn.decomposition import PCA
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X = sc.fit_transform(X)
cls = PCA(n_components=50)
pcs = cls.fit_transform(X)
df_X = pd.DataFrame(pcs, columns=[f"PC{i}" for i in range(1, 7)])
sns.scatterplot(x="PC1", y="PC2", hue = results2["win"],data=df_X)
plt.bar(["Axe 1", "Axe 2", "Axe 3", "Axe 4", "Axe 5", "Axe 6"], cls.explained_variance_ratio_)
plt.title("Explained Variance Ratio", fontsize=20)
|
999,562 | b55047dc7da79e4ae8576548ce0645054e150507 |
"""
PROJECT EULER - PROBLEM 1
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
# Version 1
result = 0 # Add numbers that meet the condition
for num in range(1,1000): # Condition to get the numbers from 1 to 1000 (not including 1000)
if num % 3 == 0 or num % 5 == 0:
result += num
print(result)
# ----------------- An alternative is to use sets (thanks to Jason Hill for making me aware of sets for this problem) ----------------- #
# Version 2
# Using sets oer lists is faster in this context. Not a huge issue for this problem, but doesn't hurt to speed things up.
multOf3 = set(range(3,1000,3))
multOf5 = set(range(5,1000,5))
resultSet = multOf3.union(multOf5)
result = sum(resultSet)
print(result)
|
999,563 | ef5c41c5284546b850efb3c492114aab172b7f26 | #!/bin/env python3
# -*- coding: utf-8 -*-
# import libraries
from bs4 import BeautifulSoup
import urllib
#from urllib.parse import urlparse
#from urllib.request import urlopen
from urllib.request import urlretrieve
import glob
from PIL import Image
import os
import json
import threading
from threading import Thread
import time
f_json = dict()
image = dict()
url = 'http://tinyurl.com/ll4ujbm'
#print 'url:', url
f_json['url'] = url
starting_time = time.time()
page = urllib.request.urlopen(url)
origin = page.url
#print 'origin url:', origin
f_json['origin'] = origin
provider = urllib.parse.urlparse(origin).hostname
#print 'provider:',provider
f_json['provider'] = provider
provider_name = urllib.parse.urlparse(origin).hostname.split('.')[1]
#print 'provider_name:', provider_name
f_json['provider_name'] = provider_name
soup = BeautifulSoup(page, 'lxml')
head = soup.find('head')
title = head.find('title')
#print 'title:', title.text
f_json['title'] = title.text
if head.find('meta', {'name': 'description'}):
decription = head.find('meta', {'name': 'description'})
#print 'description:', decription.get('content')
f_json['description'] = decription.get('content')
body = soup.find('body')
images = body.find_all('img')
for iterate in images:
try:
link = iterate.get('src')
image_name = link.split('/').pop()
if 'http' in link:
# print link
urlretrieve(link, image_name)
d = urllib.request.urlopen(link).info()['Content-Type']
print ('mime: ',d)
for infile in glob.glob(image_name):
try:
image_dict = dict()
im = Image.open(infile)
width, height = im.size # image dimensions
print(infile)
print ('width:', width)
print ('height:', height)
print ('ratio:', ((float(height)/float(width))*100.00))
print ('size', os.stat(infile).st_size, 'bytes')
print ('mime:', im.format)
#print 'color', im.getcolors()
#rgb_im = im.convert('RGB')
#r, g, b = rgb_im.getpixel((1, 1))
#print 'Image color:', r,g,b
image_dict['url'] = link
image_dict['width'] = width
image_dict['height'] = height
image_dict['ratio'] = ((float(height)/float(width))*100.00)
image_dict['size'] = os.stat(infile).st_size
image_dict['mime'] = d
except Exception as e:
print(e)
continue
image[(image_name)] = image_dict
f_json["images"] = image
except Exception as e:
print ('Error: ', e)
continue
print(json.dumps(f_json))
print('Total time taken: ', time.time()-starting_time)
'''
print head.find(property='fb:app_id') if head.find(property='fb:app_id') else 0
for i in meta:
print i.has_attr('property')
if head.find('meta', {'property' : 'fb:app_id'}):
print head.find('meta', {'property' : 'fb:app_id'})
meta = head.find_all('meta')
for iter in meta:
if iter.find('meta',{'property' : 'fb:app_id'}):
print iter
break
else:
print 'some error',iter
'''
'''
body = soup.find_all('img')
#print body
for iter in body:
print iter.get('src')
'''
'''
# downlaod images
image = body.find_all('src')
urllib.urlretrieve(image, image+".jpg")
print image
'''
|
999,564 | f0c1f5aaba5a6ba5fe4f5927edcbdb7b5b36980d | from quark_runtime import *
import quark.reflect
import use_class_before_def_md
class Bar(object):
def _init(self):
pass
def __init__(self): self._init()
def go(self):
foo = Foo();
(foo).name = u"bob"
_println((foo).name);
def _getClass(self):
return u"pkg.Bar"
def _getField(self, name):
return None
def _setField(self, name, value):
pass
Bar.pkg_Bar_ref = use_class_before_def_md.Root.pkg_Bar_md
class Foo(object):
def _init(self):
self.name = None
def __init__(self): self._init()
def _getClass(self):
return u"pkg.Foo"
def _getField(self, name):
if ((name) == (u"name")):
return (self).name
return None
def _setField(self, name, value):
if ((name) == (u"name")):
(self).name = value
Foo.pkg_Foo_ref = use_class_before_def_md.Root.pkg_Foo_md
|
999,565 | b92d7c5f68ca816b909d232f7838c9febcd1ccda | ### Imports ###################################################################
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import os
import pygame
import math
import ctypes
from ctypes import *
### Setup #####################################################################
os.putenv( 'SDL_FBDEV', '/dev/fb1' )
print 'success'
#print 'if any people are in the parking lot for some seconds, we can send signal.'
#print 'for particular time there are "some" people in parking lot, we can send signal'
# it is clear than 4_17_1.py. SAME
# Setup ##################################################
#camera setup
camera = PiCamera()
camera.resolution = ( 320, 240 )
camera.framerate = 30
rawCapture = PiRGBArray( camera, size=( 320, 240 ) )
#face recognization setup
fcounter = 0
facefind = 0
#park space setup
#time
start_1 = 0
start_2 = 0
start_3 = 0
end_1 = 0
end_2 = 0
end_3 = 0
# Load a cascade file for detecting faces####################################################################
face_for_park_cascade = cv2.CascadeClassifier('/home/pi/opencv/modules/objdetect/src/haarcascade_frontalface_default.xml')
t_start = time.time()
fps = 0
### Main ######################################################################
# Capture frames from the camera##########################################
for frame in camera.capture_continuous( rawCapture, format="bgr", use_video_port=True ):
image = frame.array
#it is standard line / look easy
cv2.line(image, (100,0),(100,200),(200,255,0),5)
cv2.line(image, (200,0),(200,200),(200,255,0),5)
cv2.line(image, (300,0),(300,200),(200,255,0),5)
cv2.putText(image,"x=100",(100,200),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255),2)
cv2.putText(image,"x=200",(200,200),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255),2)
cv2.putText(image,"x=300",(300,200),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,0,255),2)
#if we don't use this fcounter, the face recognition so sensitive that THE NUMBER of face is very changeable.
# only 1 time of 20, face is recognized. For the other 19times, the recognization is maintained.
if fcounter == 20:
fcounter = 0
facefind = 1
# Look for faces in the image using the loaded cascade file#######################
gray = cv2.cvtColor( image, cv2.COLOR_BGR2GRAY )
faces_for_park = face_for_park_cascade.detectMultiScale(gray)
for(x,y,w,h) in faces_for_park: #people exist
if x<100:
print "----------------------------"
print "person exists SECTION.1"
if(start_1 == 0):
print " start"
start_1 = time.clock()
else:
end_1 = time.clock()
if x>100 and x<200:
print "----------------------------"
print "person exists SECTION.2"
if(start_2 == 0):
print "start"
start_2 = time.clock()
else:
end_2 = time.clock()
if x>200:
print "---------------------------"
print "person exists SECTION.3"
if(start_3 == 0):
print "start"
start_3 = time.clock()
else:
end_3 = time.clock()
check = 1
#WARNING##########################################
if(end_1 - start_1 > 3.0):
print "11111111111111111111111" #Warning here
start_1 = 0
end_1 = 0
time.sleep(0.1) #it has to be here because the speed of process is so fast.
if(end_2 - start_2 > 3.0):
print "22222222222222222222222" #Warning here
start_2 = 0
end_2 = 0
time.sleep(0.1) #it has to be here because the speed of process is so fast.
if(end_3 - start_3 > 3.0):
print "33333333333333333333333" #Warning here
start_3 = 0
end_3 = 0
time.sleep(0.1) #it has to be here because the speed of process is so fast.
#face drawing
num2 = 0
while num2 <len(faces_for_park):
for(x,y,w,h) in faces_for_park:
cv2.rectangle(image,(x,y),(x+w,y+h),(200,255,0),2)
cv2.putText( image, "Face No." + str(num2), ( x, y ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, ( 0, 0, 255 ), 2 )
num2 = num2+1
#for end
#if person go out in 5 sec, reset
if(check == 1):
prex = 0 #value of start (display)
#pprex = pointer(c_int(prex))
startx = 100 #value of start line
#pstartx = pointer(c_int(startx))
endx = 300 #value of end (display)(not line)
gap = 100
countx = 0
sumx = 0 # sum of value where people exist.
totalx = 7 # sum of binary notation(1+2+4 because there are only 3 sections)
resultx = 0 #
while(startx <= endx):
for(x,y,w,h) in faces_for_park: # use this because we have to catch 'every x'.
if(x>prex and x<=startx):
sumx = sumx + pow(2,countx)
break
prex = startx
#print prex
startx = startx + gap
#print startx
countx = countx + 1
time.sleep(0.1)
resultx = totalx - sumx #result notify which section is empty.
if( resultx == 1):
start_1 = end_1 = 0
if( resultx == 2):
start_2 = end_2 = 0
if( resultx == 3):
start_1 = end_1 = 0
start_2 = end_2 = 0
if( resultx == 4):
start_3 = end_3 = 0
if( resultx == 5):
start_1 = end_1 = 0
start_3 = end_3 = 0
if( resultx == 6):
start_2 = end_2 = 0
start_3 = end_3 = 0
check = 0
if(len(faces_for_park) == 0):
print "NO people"
start_1 = start_2 = start_3 = 0
end_1 = end_2 = end_3 = 0
time.sleep(0.1)
fcounter = fcounter + 1
# Calculate and show the FPS####################################################################
fps = fps + 1
sfps = fps / ( time.time() - t_start )
cv2.putText( image, "FPS : " + str( int( sfps ) ), ( 10, 10 ), cv2.FONT_HERSHEY_SIMPLEX, 0.5, ( 0, 0, 255 ), 2 )
cv2.imshow( "Frame", image )
cv2.waitKey( 1 )
# Clear the stream in preparation for the next frame######################################
rawCapture.truncate( 0 )
|
999,566 | a775d075fa2b619a86398ea467627d4b4b2ac08c | import json
from BucketLib.bucket import Bucket
from cb_tools.cb_tools_base import CbCmdBase
from memcached.helper.data_helper import MemcachedClientHelper
class McStat(CbCmdBase):
def __init__(self, shell_conn, username="Administrator",
password="password"):
CbCmdBase.__init__(self, shell_conn, "mcstat",
username=username, password=password)
def reset(self, bucket_name):
"""
Resets mcstat for the specified bucket_name
:param bucket_name: Bucket name to reset stat
"""
cmd = "%s -h localhost:%s -u %s -P %s -b %s reset" \
% (self.cbstatCmd, self.mc_port, self.username, self.password,
bucket_name)
_, error = self._execute_cmd(cmd)
if error:
raise Exception("".join(error))
def get_tenants_stat(self, bucket_name):
cmd = "%s -h localhost:%s -u %s -P %s -b %s tenants" \
% (self.cbstatCmd, self.mc_port, self.username, self.password,
bucket_name)
output, error = self._execute_cmd(cmd)
if error:
raise Exception("".join(error))
return output
def get_user_stat(self, bucket_name, user):
# 'tenants {\"domain\":\"local\",\"user\":\"%s\"}'
cmd = "%s -h localhost:%s -u %s -P %s -b %s tenants" \
% (self.cbstatCmd, self.mc_port, user.username, user.password,
bucket_name)
output, error = self._execute_cmd(cmd)
if error:
raise Exception("{0}".format(error))
return output
def bucket_details(self, server, bucket_name):
client = MemcachedClientHelper.direct_client(
server, Bucket({"name": bucket_name}), 30,
self.username, self.password)
buckets = json.loads(client.stats("bucket_details")[
"bucket details"])["buckets"]
for bucket in buckets:
if bucket["name"] == bucket_name:
return bucket
return None
class Mcthrottle(CbCmdBase):
def __init__(self, shell_conn, username="Administrator",
password="password"):
CbCmdBase.__init__(self, shell_conn, "mcthrottlectl",
username=username, password=password)
def set_throttle_limit(self, bucket, throttle_value=5000):
cmd = "%s --user %s --password %s --throttle-limit %s %s" \
% (self.cbstatCmd, self.username, self.password,
throttle_value, bucket.name)
output, error = self._execute_cmd(cmd)
if error:
raise Exception("".join(error))
return output
|
999,567 | 0df1af32b81b88b2fcddef381ff2c76ed27d0d79 | from django.shortcuts import render
import json
import urllib
import requests
import pyrebase
from django.http import HttpResponse
config = {
'apiKey': "your_api_key",
"authDomain": "domain_name",
"databaseURL": "db_url",
"projectId": "project_id",
"storageBucket": "storage_bucket",
"messagingSenderId": "id_server"
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
def get_json_data():
data = db.child("results").get()
json_data = {
"results": []
}
for result in data.each():
json_data["results"].append(result.val())
return json_data
def sort_json_data(json_data, value):
i = 0
j = 0
for val in json_data["results"]:
j = 0
count = 0
for sub in val["subjects"]:
if value.lower() not in sub["title"].lower():
json_data["results"][i]["subjects"][j] = None
count = count + 1
j = j + 1
if count == len(val["subjects"]):
json_data["results"][i]["presence"] = None
i = i + 1
return json_data
def get_subject_count(array):
answer = 0
for val in array:
count = 0
if val["presence"]:
for sub in val["subjects"]:
if sub is not None:
count = count + 1
answer = answer + count
return answer
def load_category(array):
category = []
for val in array:
category.append(val["category"])
return category
def first_page(request):
if request.method == "POST":
data = request.POST.dict()
value = data["userData"]
json_data = get_json_data()
json_data = sort_json_data(json_data, value)
array = json_data["results"]
sub_count = get_subject_count(array)
return render(request, "showcard.html", {"arr": array, "total": sub_count})
else:
json_data = get_json_data()
array = json_data["results"]
sub_count = get_subject_count(array)
category = load_category(array)
return render(request, "home.html", {"arr": array, "total": sub_count, "category": category})
def get_children_count(select):
data = db.child("results").get()
count = 0
flag = True
current = 0
for result in data.each():
if result.val()["category"] == select:
flag = False
for val in result.val()["subjects"]:
count = count + 1
if flag:
current = current + 1
else:
break
send_data = {
"current": current,
"count": count
}
return send_data
def put_data(request):
select = request.GET.getlist('select',default=None)
name = request.GET.getlist('name',default=None)
price = request.GET.getlist('price',default=None)
select = str(select[0])
name = str(name[0])
price = str(price[0])
new_data = {
"title": name,
"price": price
}
num = get_children_count(select)
db.child("results").child(num["current"]).child("subjects").child(num["count"]).set(new_data)
return HttpResponse("Hello")
|
999,568 | e513728fa4fcd925ef4008aa67f8604a10bda3f5 | infile = open("McHenry_County_Precinct_Names.txt", "rt", encoding = 'utf-8')
namelist= infile.read()
infile.close()
processednamelist= namelist.replace(" ","")
PrecinctNameList= processednamelistnamelist.split('\n')
|
999,569 | c52431b0f6f3ce21628f83a72ae43d09debc73c4 | import ipdb
import numpy as np
import sympy as sp
import scipy as sc
import matplotlib.pyplot as plt
from numpy import sin,cos,tan,sqrt
from numpy.linalg import inv
import pickle
import time
EndTime = 0.55
ChangeInTime = 0.0001
Time = np.arange(0,EndTime+ChangeInTime,ChangeInTime,float)
#NumberOfAdditionalLoops = 99
#NumberOfTrials = 1000
PositionInX = []
PositionInY = []
HeightInInches = 71
DegreeToRadianFactor = np.pi/180
Height = HeightInInches*2.54
ShoulderToElbowLength = 0.186*Height
ForearmLength = 0.146*Height
HandLength = 0.108*Height
ReleaseAngle = DegreeToRadianFactor*30
ShotDepth = 457.2
Angle1Initial = DegreeToRadianFactor*-10
Angle1Final = DegreeToRadianFactor*120
Angle2Initial = DegreeToRadianFactor*85
Angle2Final = DegreeToRadianFactor*84
Angle3Initial = DegreeToRadianFactor*0
Angle3Final = DegreeToRadianFactor*-35
def position_in_x(Angle1, Angle2, Angle3):
"""
Takes in the values of Angle1, Angle2, and Angle3 and generates the x position of the endpoint.
"""
PositionInX = ShoulderToElbowLength*sin(Angle1) \
+ ForearmLength*sin(Angle1+Angle2) \
+ HandLength*sin(Angle1+Angle2-Angle3)
return(PositionInX)
def position_in_y(Angle1,Angle2,Angle3):
"""
Takes in the values of Angle1, Angle2, and Angle3 and generates the y position of the endpoint.
"""
PositionInY = -ShoulderToElbowLength*cos(Angle1) \
- ForearmLength*cos(Angle1+Angle2) \
- HandLength*cos(Angle1+Angle2-Angle3)
return(PositionInY)
FinalPositionInX = position_in_x(Angle1Final,Angle2Final,Angle3Final)
FinalPositionInY = position_in_y(Angle1Final,Angle2Final,Angle3Final)
def displacement_in_x(FinalPositionInX,ReleaseAngle,ShotDepth):
"""
Utilizes the final endpoint position in x, the release angle, the shot depth and known values of the players
height, ball radius, and hoop radius to calculate the total distance the projectile will be from the basket
in the x direction.
"""
FootLength = 0.152*Height
BallRadius = 11.9
BallDisplacementInX = BallRadius*cos(ReleaseAngle)
HoopRadius = 22.9
ChangeInX = ShotDepth + FootLength - FinalPositionInX - BallDisplacementInX - HoopRadius
return(ChangeInX/100.)
def displacement_in_y(FinalPositionInY,ReleaseAngle):
"""
Utilizes the final endpoint position in y and the release angle as well as known values of the players
height, ball radius, and hoop height to calculate the total distance the projectile will be from the basket
in the y direction.
"""
BallRadius = 11.9
BallDisplacementInY = BallRadius*sin(ReleaseAngle)
HoopHeight = 304.8
ChangeInY = HoopHeight - 0.87*Height - FinalPositionInY - BallDisplacementInY
return(ChangeInY/100.)
def initial_projectile_velocity(FinalPositionInX, FinalPositionInY, ReleaseAngle, ShotDepth):
"""
Takes the final endpoint positions in x and y (initial projectile positions) as well as release angle and
shot depth to find the initial velocity of the projectile to make it into the basket. The following logic
helps explain the equation used:
VelocityInX = Velocity*cos(ReleaseAngle)
Time = displacement_in_x(...)/VelocityInX = displacement_in_x(...)/(Velocity*cos(ReleaseAngle))
displacement_in_y(...) = VelocityInY*Time - (9.8/2)*Time**2
VelocityInY/VelocityInX = [Velocity*sin(ReleaseAngle)]/[Velocity*cos(ReleaseAngle)] = tan(ReleaseAngle)
VelocityInY*Time = VelocityInY*displacement_in_x(...)/VelocityInX = tan(ReleaseAngle)*displacement_in_x(...)
Time**2 = displacement_in_x(...)**2/VelocityInX**2 = (1/Velocity**2)*displacement_in_x(...)/(cos(ReleaseAngle)**2)
Velocity**2 = (9.8/2)*(displacement_in_x(...)/cos(ReleaseAngle)**2)/(displacement_in_x(...)*tan(ReleaseAngle)-displacement_in_y(...))
"""
Velocity = sqrt((9.8/2.)*(displacement_in_x(FinalPositionInX,ReleaseAngle,ShotDepth)/cos(ReleaseAngle))**2 \
/ (displacement_in_x(FinalPositionInX,ReleaseAngle,ShotDepth)*tan(ReleaseAngle) \
- displacement_in_y(FinalPositionInY,ReleaseAngle)))
return(Velocity)
InitialProjectileVelocity = initial_projectile_velocity(FinalPositionInX, FinalPositionInY, ReleaseAngle,ShotDepth)
EndpointVelocity = [cos(ReleaseAngle)*InitialProjectileVelocity*100, sin(ReleaseAngle)*InitialProjectileVelocity*100, 0.0]
def jacobian(Angle1Final,Angle2Final,Angle3Final):
"""
Generates the Jacobian matrix for the geometric model of a three link planar system (i.e. 3 dof's).
Final joint angles are then substituted to create the Jacobian matrix of the endpoint in the final release
posture.
"""
Angle1,Angle2,Angle3 = sp.symbols('Angle1,Angle2,Angle3',real=True)
x = ShoulderToElbowLength*sp.sin(Angle1) \
+ ForearmLength*sp.sin(Angle1+Angle2) \
+ HandLength*sp.sin(Angle1+Angle2-Angle3)
y = -ShoulderToElbowLength*sp.cos(Angle1) \
- ForearmLength*sp.cos(Angle1+Angle2) \
- HandLength*sp.cos(Angle1+Angle2-Angle3)
alpha = Angle1 + Angle2 - Angle3
GeometricModel = sp.Matrix([x,y,alpha])
SymbolicJacobianMatrix = GeometricModel.jacobian([Angle1,Angle2,Angle3])
JacobianMatrix = SymbolicJacobianMatrix.subs([(Angle1,Angle1Final), (Angle2,Angle2Final), (Angle3,Angle3Final)]).evalf()
return(np.array(JacobianMatrix).astype(float))
JacobianMatrix = jacobian(Angle1Final,Angle2Final,Angle3Final)
AngularVelocities = np.dot(inv(jacobian(Angle1Final,Angle2Final,Angle3Final)),EndpointVelocity)
Angle1Bounds = [Angle1Initial, DegreeToRadianFactor*140]
AngularVelocity1Initial = 0
AngularVelocity1Final = AngularVelocities[0]
Angle2Bounds = [0, DegreeToRadianFactor*135]
AngularVelocity2Initial = 0
AngularVelocity2Final = AngularVelocities[1]
Angle3Bounds = [DegreeToRadianFactor*-90, 0]
AngularVelocity3Initial = 0
AngularVelocity3Final = AngularVelocities[2]
def c_matrix(x1,x2,x3):
"""
Takes in the values of x1, x2, and x3 to create the C matrix needed to find the coefficients of a clamped
cubic spline with only one break (i.e. Cx = y, where x is an array of c coefficients for the
piecewise polynomial equation y = a + b*(x-x_o) + c*(x-x_o)**2 + d*(x-x_o)**3). Returns a matrix.
"""
C = np.array([ [ 2*(x2-x1), (x2-x1), 0 ], \
[ (x2-x1), 2*(x3-x1), (x3-x2) ], \
[ 0, (x3-x2), 2*(x3-x2) ] ], \
float)
return(C)
def y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):
"""
Takes in the values of (x1,y1), (x2,y2), and (x3,y3) to create the y array necessary for the clamped cubic
spline matrix manipulation for one break only (i.e. Cx = y, where x is an array of c coefficients for the
piecewise polynomial equation y = a + b*(x-x_o) + c*(x-x_o)**2 + d*(x-x_o)**3). Returns an array.
"""
y = np.array([ 3*(y2-y1)/(x2-x1) - 3*initial_slope , \
3*(y3-y2)/(x3-x2) - 3*(y2-y1)/(x2-x1), \
3*final_slope - 3*(y3-y2)/(x3-x2) ], \
float)
return(y)
def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):
"""
Using matrix manipulations the equation Cx = y necessary for the c coefficients for a clamped cubic spline
with only one break (i.e. Cx = y, where x is an array of c coefficients for the piecewise polynomial
equation y = a + b*(x-x_o) + c*(x-x_o)**2 + d*(x-x_o)**3) can be rearranged such that x = C.T*y. The values
(x1,y1), (x2,y2), and (x3,y3) are the three points needed to the spline and initial_slope and final_slope
are the endpoint conditions. Returns an array.
"""
C = c_matrix(x1,x2,x3)
y = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)
CCoefficients = np.dot(inv(C),y)
return(CCoefficients)
def d_coefficients(x1,x2,x3,CCoefficients):
"""
Uses the c coefficients and the values of x1, x2, and x3 to find the d coefficients for the piecewise
polynomial equation y = a + b*(x-x_o) + c*(x-x_o)**2 + d*(x-x_o)**3). CCoefficients must be an array with
three elements. Returns an array.
"""
DCoefficients = np.array([ (CCoefficients[1]-CCoefficients[0])/(3*(x2-x1)), \
(CCoefficients[2]-CCoefficients[1])/(3*(x3-x2)) ], \
float)
return(DCoefficients)
def b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients):
"""
Uses the c and d coefficients and the values of (x1,y1), (x2,y2), and (x3,y3) to find the b coefficients for
the piecewise polynomial equation y = a + b*(x-x_o) + c*(x-x_o)**2 + d*(x-x_o)**3). CCoefficients must be an
array with two or more elements and DCoefficients must be an array with two elements. Returns an array.
"""
BCoefficients = np.array([ ((y2-y1)/(x2-x1)-CCoefficients[0]*(x2-x1) - DCoefficients[0]*((x2-x1)**2)), \
((y3-y2)/(x3-x2)-CCoefficients[1]*(x3-x2) - DCoefficients[1]*((x3-x2)**2)) ]).astype(float)
return(BCoefficients)
def test_b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients,expected_slope):
"""
Tests to make sure that the generated b coefficients match the expected slope. Uses the c and d coefficients
and the values of (x1,y1), (x2,y2), and (x3,y3) to find the b coefficients for the piecewise polynomial
equation y = a + b*(x-x_o) + c*(x-x_o)**2 + d*(x-x_o)**3). CCoefficients must be an array with two or more
elements and DCoefficients must be an array with two elements. Returns TRUE if expected_slope equals b.
"""
B = b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients)
result = abs(B[0]-expected_slope)< 0.001
return(result)
assert B[0]==expected_slope, "First b coefficient (%f) does not equal initial slope (%f)." (B[0],expected_slope)
def a_coefficients(y1,y2):
"""
Uses the y values of (x1,y1) and (x2,y2) to find the a coefficients for the piecewise polynomial equation
y = a + b*(x-x_o) + c*(x-x_o)**2 + d*(x-x_o)**3). Returns an array.
"""
ACoefficients = np.array([ y1, \
y2 ]).astype(float)
return(ACoefficients)
def spline_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):
"""
Uses the values of (x1,y1), (x2,y2), and (x3,y3) to find the coefficients for the piecewise polynomial
equation y = a + b*(x-x_o) + c*(x-x_o)**2 + d*(x-x_o)**3) for a clamped cubic spline with one break only.
Returns coefficient arrays A, B, C,and D.
"""
C = c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)
D = d_coefficients(x1,x2,x3,C)
B = b_coefficients(x1,x2,x3,y1,y2,y3,C,D)
A = a_coefficients(y1,y2)
return(A,B,C[:2],D)
def generate_random_point(xmin,xmax,ymin,ymax):
"""
Generates a random point in Cartesian space such that x is in [xmin, xmax] and y is in [ymin,ymax].
Number is chosen at random from a uniform distribution. Returns two floats.
"""
np.random.seed()
x_rand = np.random.uniform(xmin,xmax)
y_rand = np.random.uniform(ymin,ymax)
return(x_rand,y_rand)
def test_endpoint_slope(b,c,d,x_n_minus_1,x_n,expected_slope):
"""
Takes in the cubic spline coefficients for the derivative of y = a + b*(x-x_n_minus_1) + c*(x-x_n_minus_1)**2 + d*(x-x_n_minus_1)**3
(y' = b + 2*c*(x-x_n_minus_1) + 3*d*(x-x_n_minus_1)**2) for the last piecewise polynomial and tests to see if the expected slope at
the endpoint is equal to the actual endpoint slope. The variable x_n_minus_1 is the initial value of the final piecewise polynomial
and x_n is the final data point. Returns TRUE if they are equal.
"""
actual_slope = b + 2*c*(x_n-x_n_minus_1) + 3*d*(x_n-x_n_minus_1)**2
result = abs(actual_slope-expected_slope)<0.001
return(result)
def test_for_discontinuity(a_n,b_n,c_n,d_n,x_n,x_n_plus_1,y_n_plus_1):
"""
Takes in the coefficients for a cubic spline polynomial y = a_n + b_n*(x-x_n) + c_n*(x-x_n)**2 + d_n*(x-x_n)**3
and tests to see if the final y value for this piecewise polynomial is equal to the initial y value of the next
piecewise polynomial (i.e. when x = x_n_plus_1). The variable x_n is the initial x value of the preceding
polynomial, and x_n_plus_1 is the transition value from one polynomial to the next. y_n_plus_1 is the initial y
value for the next piecewise polynomial.
"""
y_n_final = a_n + b_n*(x_n_plus_1-x_n) + c_n*(x_n_plus_1-x_n)**2 + d_n*(x_n_plus_1-x_n)**3
result = abs(y_n_final-y_n_plus_1)<0.001
return(result)
class Spline:
"""
Initiate a class variable spline that has one break at x = x_break starting at x_initial and has
the equation y = a + b*(x-x_o) + c*(x-x_o)**2 + d*(x-x_o)**3.
pp_func(X)
~~~~~~~~~~~~~~~~~~~
Takes in X array and outputs the piecewise polynomial associated with this spline.
pp_deriv()
~~~~~~~~~~~~~~~~~~~
Takes in X array and outputs the piecewise polynomial associated with the spline's derivative.
find_max_and_min()
~~~~~~~~~~~~~~~~~~~
Takes in the min and max values for both x and y and will find the maximum y values of the piecewise
polynomial. To do this, first we find the extrema point (find_extrema) by inputing the x values that
set the derivate of the piecewise polynomial equal to zero (quadratic formula). Next we ensure that
the zero values are in fact real (is_real). We then filter out the zeros that are not in the
appropriate domains (is_in_appropriate_domain). To see if these values are maximum or minimum, we
plug them back into the second derivative of the appropriate piecewise polynomial (second_deriv_is_neg()
and second_deriv_is_pos(), respectively). Finally we determine the y value of these extrema by using
the class function self.pp_func().
is_initial_slope_positive()
~~~~~~~~~~~~~~~~~~~
This takes in X and will check to make sure that for the first 2500 entries in X, that the derivative
of the piecewise polynomial (pp_deriv()) will be positive. Make sure that X is at least 2500 in length.
is_within_bounds()
~~~~~~~~~~~~~~~~~~~
This checks to see if the maximum maximum value and the minimum mininum value calculated above will
fall between y_min and y_max. This makes use of self.find_max_and_min()
"""
def __init__(self,a,b,c,d,x_initial,x_break):
self.a = a
self.b = b
self.c = c
self.d = d
self.x_initial = x_initial
self.x_break = x_break
#self.all_values = {'A': a, 'B' : b, 'C' : c, 'D' : d, 'init' : x_initial, 'break' : x_break}
def pp_func(self,X):
result = np.piecewise(X,[X <= self.x_break, X > self.x_break], \
[lambda X: self.a[0] + self.b[0]*(X-self.x_initial) + self.c[0]*(X-self.x_initial)**2 + self.d[0]*(X-self.x_initial)**3, \
lambda X: self.a[1] + self.b[1]*(X-self.x_break) + self.c[1]*(X-self.x_break)**2 + self.d[1]*(X-self.x_break)**3])
return(result)
def pp_deriv(self,X):
result = np.piecewise(X,[X <= self.x_break, X > self.x_break], \
[lambda X: self.b[0] + 2*self.c[0]*(X-self.x_initial) + 3*self.d[0]*(X-self.x_initial)**2, \
lambda X: self.b[1] + 2*self.c[1]*(X-self.x_break) + 3*self.d[1]*(X-self.x_break)**2])
return(result)
def find_max_and_min(self,x_min,x_max,y_min,y_max):
def find_extrema():
extrema_1 = np.float(self.x_initial + (- 2*self.c[0] + (4*self.c[0]**2 - 12*self.b[0]*self.d[0])**.5)/(6*self.d[0]))
extrema_2 = np.float(self.x_initial + (- 2*self.c[0] - (4*self.c[0]**2 - 12*self.b[0]*self.d[0])**.5)/(6*self.d[0]))
extrema_3 = np.float(self.x_break + (- 2*self.c[1] + (4*self.c[1]**2 - 12*self.b[1]*self.d[1])**.5)/(6*self.d[1]))
extrema_4 = np.float(self.x_break + (- 2*self.c[1] - (4*self.c[1]**2 - 12*self.b[1]*self.d[1])**.5)/(6*self.d[1]))
return(extrema_1,extrema_2,extrema_3,extrema_4)
def is_real(x_value):
result = not isinstance(x_value,complex)
return(result)
def is_in_appropriate_domain(x_value,x_min,x_max,segment_number):
if segment_number == 1:
result = x_value >= x_min and x_value <= self.x_break
elif segment_number == 2:
result = x_value >= self.x_break and x_value <= x_max
return(result)
def second_deriv_is_neg(x_value,segment_number):
if segment_number == 1:
x_not = self.x_initial
elif segment_number == 2:
x_not = self.x_break
second_deriv = 2*self.c[segment_number-1] + 6*self.d[segment_number-1]*(x_value-x_not)
result = second_deriv<0
return(result)
def second_deriv_is_pos(x_value,segment_number):
if segment_number == 1:
x_not = self.x_initial
elif segment_number == 2:
x_not = self.x_break
second_deriv = 2*self.c[segment_number-1] + 6*self.d[segment_number-1]*(x_value-x_not)
result = second_deriv>0
return(result)
def determine_if_max_or_min(extrema_1,extrema_2,extrema_3,extrema_4,x_min,x_max):
maxima = []
minima = []
if is_real(extrema_1) and is_in_appropriate_domain(extrema_1,x_min,x_max,1):
if second_deriv_is_neg(extrema_1,1):
maxima.append(np.float(self.pp_func(extrema_1)))
elif second_deriv_is_pos(extrema_1,1):
minima.append(np.float(self.pp_func(extrema_1)))
if is_real(extrema_2) and is_in_appropriate_domain(extrema_2,x_min,x_max,1):
if second_deriv_is_neg(extrema_2,1):
maxima.append(np.float(self.pp_func(extrema_2)))
elif second_deriv_is_pos(extrema_2,1):
minima.append(np.float(self.pp_func(extrema_2)))
if is_real(extrema_3) and is_in_appropriate_domain(extrema_3,x_min,x_max,2):
if second_deriv_is_neg(extrema_3,2):
maxima.append(np.float(self.pp_func(extrema_3)))
elif second_deriv_is_pos(extrema_3,2):
minima.append(np.float(self.pp_func(extrema_3)))
if is_real(extrema_4) and is_in_appropriate_domain(extrema_4,x_min,x_max,2):
if second_deriv_is_neg(extrema_4,2):
maxima.append(np.float(self.pp_func(extrema_4)))
elif second_deriv_is_pos(extrema_4,2):
minima.append(np.float(self.pp_func(extrema_4)))
return(maxima,minima)
extrema_1,extrema_2,extrema_3,extrema_4 = find_extrema()
maxima, minima = determine_if_max_or_min(extrema_1,extrema_2,extrema_3,extrema_4,x_min,x_max)
return(maxima,minima)
def is_initial_slope_positive(self,X,cutoff):
result = min(self.pp_deriv(X[:cutoff]))>=0
return(result)
def is_within_bounds(self,x_min,x_max,y_min,y_max):
maxima,minima = self.find_max_and_min(x_min,x_max,y_min,y_max)
if len(maxima) == 0:
maxima = y_max
if len(minima) == 0:
minima = y_min
result = np.max(maxima) <= y_max and np.min(minima) >= y_min
return(result)
def clamped_cubic_spline(x_initial,x_final,y_initial,y_final,initial_slope,final_slope,ymin,ymax,X,**options):
"""
This will take in the initial and final values for both x and y, as well as the desired initial and final
slopes and generate 1000 clamped cubic spline that produce y values that are within the bounds [ymin, ymax].
Returns a list of arrays, each of len(X). Options allows for slope limitations on shoulder rotations such
that the derivative of the spline is always positive to match observations (slope = "Shoulder").
"""
NumberOfTrials = 10000
i = 0
StartTime = time.time()
while i < NumberOfTrials:
x_rand,y_rand = generate_random_point(x_initial,x_final,ymin,ymax)
A,B,C,D = spline_coefficients(x_initial,x_rand,x_final,y_initial,y_rand,y_final,initial_slope,final_slope)
assert test_b_coefficients(x_initial,x_rand,x_final,y_initial,y_rand,y_final,C,D,initial_slope), "Initial slope does not match the expected value"
assert test_endpoint_slope(B[1],C[1],D[1],x_rand,x_final,final_slope),"Problem with Endpoint Slope"
assert test_for_discontinuity(A[0],B[0],C[0],D[0],x_initial,x_rand,A[1]), "Jump Discontinuity at t = %f!" %x_rand
spline_structure = Spline(A,B,C,D,x_initial,x_rand)
if options.get("angle") == "Shoulder":
options_slope_condition = spline_structure.is_initial_slope_positive(X,2501)
dof = "Shoulder: " + " "*(10-len("Shoulder: "))
elif options.get("angle") == "Elbow":
options_slope_condition = spline_structure.is_initial_slope_positive(X,501)
dof = "Elbow: " + " "*(10-len("Elbow: "))
else:
options_slope_condition = True
dof = "Wrist: " + " "*(10-len("Wrist: "))
if i == 0:
if spline_structure.is_within_bounds(x_initial,x_final, ymin, ymax) and options_slope_condition:
Splines = spline_structure
i+=1
statusbar = '[' + '\u25a0'*int((i+1)/(NumberOfTrials/50)) + '\u25a1'*(50-int((i+1)/(NumberOfTrials/50))) + '] '
print(dof + statusbar + '{0:1.1f}'.format(i/NumberOfTrials*100) + '% complete, ' + '{0:1.1f}'.format(time.time() - StartTime) + 'sec \r', end='')
elif i == 1:
if spline_structure.is_within_bounds(x_initial,x_final, ymin, ymax) and options_slope_condition:
Splines = np.concatenate(([Splines], [spline_structure]), axis = 0)
i+=1
statusbar = '[' + '\u25a0'*int((i+1)/(NumberOfTrials/50)) + '\u25a1'*(50-int((i+1)/(NumberOfTrials/50))) + '] '
print(dof + statusbar + '{0:1.1f}'.format(i/NumberOfTrials*100) + '% complete, ' + '{0:1.1f}'.format(time.time() - StartTime) + 'sec \r', end='')
else:
if spline_structure.is_within_bounds(x_initial,x_final, ymin, ymax) and options_slope_condition:
Splines = np.concatenate((Splines, [spline_structure]), axis = 0)
i+=1
statusbar = '[' + '\u25a0'*int((i+1)/(NumberOfTrials/50)) + '\u25a1'*(50-int((i+1)/(NumberOfTrials/50))) + '] '
print(dof + statusbar + '{0:1.1f}'.format(i/NumberOfTrials*100) + '% complete, ' + '{0:1.1f}'.format(time.time() - StartTime) + 'sec \r', end='')
print('\n')
return(Splines)
def run_N_loops(NumberOfLoops):
for LoopNumber in range(NumberOfLoops):
Angle1Splines = clamped_cubic_spline(0,EndTime,Angle1Initial,Angle1Final,AngularVelocity1Initial, \
AngularVelocity1Final,Angle1Bounds[0],Angle1Bounds[1],Time,\
angle = "Shoulder")
Angle2Splines = clamped_cubic_spline(0,EndTime,Angle2Initial,Angle2Final,AngularVelocity2Initial, \
AngularVelocity2Final,Angle2Bounds[0],Angle2Bounds[1],Time, \
angle = "Elbow")
Angle3Splines = clamped_cubic_spline(0,EndTime,Angle3Initial,Angle3Final,AngularVelocity3Initial, \
AngularVelocity3Final,Angle3Bounds[0],Angle3Bounds[1],Time)
if LoopNumber <= 8:
print('-'*37 + 'End of Loop ' + str(LoopNumber+1) + '-'*37)
else:
print('-'*36 + 'End of Loop ' + str(LoopNumber+1) + '-'*37)
pickle.dump([Angle1Splines, Angle2Splines, Angle3Splines], open('LoopNumber' + str(LoopNumber+1) + '.pkl','wb'),pickle.HIGHEST_PROTOCOL)
run_N_loops(10)
|
999,570 | 761213381bf54047b49a9e79c3916a90c7184e58 | from common_fixtures import * # NOQA
logger = logging.getLogger(__name__)
def test_dns_activate_svc_dns_consumed_svc_link(client):
port = "31100"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_cross_link(client):
port = "31101"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale,
port, True)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env, get_env(client, consumed_service),
get_env(client, consumed_service1), dns])
def test_dns_service_scale_up(client):
port = "31107"
service_scale = 1
consumed_service_scale = 2
final_service_scale = 3
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_services_scale_down(client):
port = "31108"
service_scale = 3
consumed_service_scale = 2
final_service_scale = 1
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_consumed_services_scale_up(client):
port = "31109"
service_scale = 1
consumed_service_scale = 2
final_consumed_service_scale = 4
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
consumed_service = client.update(consumed_service,
scale=final_consumed_service_scale,
name=consumed_service.name)
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
assert consumed_service.scale == final_consumed_service_scale
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_consumed_services_scale_down(client):
port = "3110"
service_scale = 2
consumed_service_scale = 3
final_consumed_service_scale = 1
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
consumed_service = client.update(consumed_service,
scale=final_consumed_service_scale,
name=consumed_service.name)
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
assert consumed_service.scale == final_consumed_service_scale
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_consumed_services_stop_start_instance(client,
socat_containers):
port = "3111"
service_scale = 1
consumed_service_scale = 3
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
container_name = get_container_name(env, consumed_service, 2)
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Stop instance
stop_container_from_host(client, container)
consumed_service = wait_state(client, consumed_service, "active")
wait_for_scale_to_adjust(client, consumed_service)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_consumed_services_restart_instance(client):
port = "3112"
service_scale = 1
consumed_service_scale = 3
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
container_name = get_container_name(env, consumed_service, 2)
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Restart instance
container = client.wait_success(container.restart(), 120)
assert container.state == 'running'
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_consumed_services_delete_instance(client):
port = "3113"
service_scale = 1
consumed_service_scale = 3
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
container_name = get_container_name(env, consumed_service, 1)
containers = client.list_container(name=container_name)
assert len(containers) == 1
container = containers[0]
# Delete instance
container = client.wait_success(client.delete(container))
assert container.state == 'removed'
wait_for_scale_to_adjust(client, consumed_service)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_consumed_services_deactivate_activate(client):
port = "3114"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
consumed_service = consumed_service.deactivate()
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "inactive"
wait_until_instances_get_stopped(client, consumed_service)
consumed_service = consumed_service.activate()
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_service_deactivate_activate(client):
port = "3115"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
service = service.deactivate()
service = client.wait_success(service, 120)
assert service.state == "inactive"
wait_until_instances_get_stopped(client, service)
service = service.activate()
service = client.wait_success(service, 120)
assert service.state == "active"
time.sleep(restart_sleep_interval)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_deactivate_activate_environment(client):
port = "3116"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
env = env.deactivateservices()
service = client.wait_success(service, 120)
assert service.state == "inactive"
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "inactive"
wait_until_instances_get_stopped(client, service)
wait_until_instances_get_stopped(client, consumed_service)
env = env.activateservices()
service = client.wait_success(service, 120)
assert service.state == "active"
consumed_service = client.wait_success(consumed_service, 120)
assert consumed_service.state == "active"
time.sleep(restart_sleep_interval)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_add_remove_servicelinks(client):
port = "3117"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
# Add another service to environment
launch_config = {"image": WEB_IMAGE_UUID}
random_name = random_str()
consumed_service_name = random_name.replace("-", "")
consumed_service2 = client.create_service(name=consumed_service_name,
stackId=env.id,
launchConfig=launch_config,
scale=2)
consumed_service2 = client.wait_success(consumed_service2, 120)
assert consumed_service2.state == "active"
# Add another service link
dns = client.update(dns,
serviceLinks=[
{"type": "link", "name": consumed_service.name},
{"type": "link", "name": consumed_service1.name},
{"type": "link", "name": consumed_service2.name}])
dns = client.wait_success(dns, timeout=60)
validate_dns_service(
client, service, [consumed_service, consumed_service1,
consumed_service2], port, dns.name)
# Remove existing service link to the service
dns = client.update(dns,
serviceLinks=[
{"type": "link", "name": consumed_service1.name},
{"type": "link", "name": consumed_service2.name}])
dns = client.wait_success(dns, timeout=60)
validate_dns_service(
client, service, [consumed_service1, consumed_service2],
port, dns.name)
delete_all(client, [env])
def test_dns_services_delete_and_add_consumed_service(client):
port = "3119"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
# Delete consume service
consumed_service = client.wait_success(client.delete(consumed_service))
assert consumed_service.state == "removed"
validate_dns_service(client, service, [consumed_service1], port,
dns.name)
# Add another consume service and link the service to this newly created
# service
launch_config = {"image": WEB_IMAGE_UUID}
random_name = random_str()
service_name = random_name.replace("-", "")
consumed_service2 = client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config,
scale=1)
consumed_service2 = client.wait_success(consumed_service2)
assert consumed_service2.state == "active"
dns = client.update(dns,
serviceLinks=[
{"type": "link", "name": consumed_service1.name},
{"type": "link", "name": consumed_service2.name}])
dns = client.wait_success(dns, timeout=60)
validate_dns_service(
client, service, [consumed_service1, consumed_service2], port,
dns.name)
delete_all(client, [env])
def test_dns_services_stop_start_instance(client,
socat_containers):
port = "3120"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
container_name = get_container_name(env, service, 2)
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Stop service instance
stop_container_from_host(client, service_instance)
service = client.wait_success(service)
wait_for_scale_to_adjust(client, service)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_services_restart_instance(client):
port = "3121"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
container_name = get_container_name(env, service, 2)
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# Restart service instance
service_instance = client.wait_success(service_instance.restart(), 120)
assert service_instance.state == 'running'
time.sleep(restart_sleep_interval)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_service_restore_instance(client):
port = "3122"
service_scale = 2
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
container_name = get_container_name(env, service, 2)
containers = client.list_container(name=container_name)
assert len(containers) == 1
service_instance = containers[0]
# delete service instance
service_instance = client.wait_success(client.delete(service_instance))
assert service_instance.state == 'removed'
wait_for_scale_to_adjust(client, service)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_dns_deactivate_activate(client):
port = "3114"
service_scale = 1
consumed_service_scale = 2
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
dns = dns.deactivate()
dns = client.wait_success(dns, 120)
assert dns.state == "inactive"
dns = dns.activate()
dns = client.wait_success(dns, 120)
assert dns.state == "active"
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_svc_managed_cosumed_service_hostnetwork(client):
port = "3118"
service_scale = 1
consumed_service_scale = 1
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port,
isnetworkModeHost_svc=False, isnetworkModeHost_consumed_svc=True)
validate_dns_service(
client, service, [consumed_service, consumed_service1], port,
dns.name)
delete_all(client, [env])
def test_dns_svc_hostnetwork_cosumed_service_hostnetwork(client):
port = "3119"
service_scale = 1
consumed_service_scale = 1
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port,
isnetworkModeHost_svc=True, isnetworkModeHost_consumed_svc=True)
dns_name = dns.name + "." + env.name + "." + RANCHER_FQDN
validate_dns_service(
client, service, [consumed_service, consumed_service1], "33",
dns_name)
delete_all(client, [env])
def test_dns_svc_hostnetwork_cosumed_service_managednetwork(
client):
port = "3119"
service_scale = 1
consumed_service_scale = 1
env, service, consumed_service, consumed_service1, dns = \
create_env_with_2_svc_dns(
client, service_scale, consumed_service_scale, port,
isnetworkModeHost_svc=True, isnetworkModeHost_consumed_svc=False)
dns_name = dns.name + "." + env.name + "." + RANCHER_FQDN
validate_dns_service(
client, service, [consumed_service, consumed_service1], "33",
dns_name)
delete_all(client, [env])
|
999,571 | b4e7a56146fbe12a9ce4716885eeeea366330232 | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: dzylc
#
# Created: 26-06-2014
# Copyright: (c) dzylc 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
def service():
t=np.arange(-9,9)
y=t*np.sin(t)
plt.plot(t,y,'ro')
g=np.zeros((len(t),len(t)))
for m in range(len(t)):
for n in range(len(t)):
g[m,n]=np.exp(-np.abs(t[m]-t[n]))
w=np.dot(np.dot(np.linalg.inv(np.dot(g.T,g)),g.T),y)
tt=np.arange(-9,9,0.01)
g2=np.zeros((len(tt),len(t)))
for m in range(len(tt)):
for n in range(len(t)):
g2[m,n]=np.exp(-np.abs(tt[m]-t[n]))
yy=np.dot(g2,w)
print tt
print '/'*80
print g2
print '*'*80
print yy
line=tt*np.sin(tt)
plt.plot(tt,line,'r')
plt.plot(tt,yy)
plt.show()
def main():
pass
service()
if __name__ == '__main__':
main()
|
999,572 | 2340e5567550de2b60ae9c1dfbc5e4b00f6cce98 | import math
pi = math.pi #Число Пи. Примерно, равное 3.141592653589793
def calculate_sphere_volume(r):
try:
x = int(r)
if r >= 0:
return 4/3*pi*r**3
else:
return 'Радиус сферы не может быть отрицательным'
except ValueError:
return r
print(calculate_sphere_volume('for'))
|
999,573 | 0cd341324cf75b566935fcc79d196a554c66e4ce | # coding: utf-8
from argparse import ArgumentParser
from collections import OrderedDict
import datetime as dt
import logging
import sys
from path_helpers import path
import si_prefix as si
from .. import pformat_dict
from ..commands import (DEFAULT_INDEX_HOST, freeze, get_plugins_directory,
install, SERVER_URL_TEMPLATE, uninstall, search)
from ..hooks import on_plugin_install
logger = logging.getLogger(__name__)
default_plugins_directory = get_plugins_directory()
default_config_path = (default_plugins_directory.parent
.joinpath('microdrop.ini'))
# Parsers that may be reused by other modules.
LOG_PARSER = ArgumentParser(add_help=False)
LOG_PARSER.add_argument('-l', '--log-level', default='error',
choices=['error', 'debug', 'info'])
CONFIG_PARSER_ARGS = (('-c', '--config-file'),
dict(type=path, help='MicroDrop config file '
'(default="{default}").'
.format(default=default_config_path)))
CONFIG_PARSER = ArgumentParser(add_help=False)
CONFIG_PARSER.add_argument(*CONFIG_PARSER_ARGS[0], **CONFIG_PARSER_ARGS[1])
SERVER_PARSER = ArgumentParser(add_help=False)
SERVER_PARSER.add_argument('-s', '--server-url',
default=DEFAULT_INDEX_HOST, help='MicroDrop '
'plugin index URL (default="%(default)s")')
PLUGINS_PARSER = ArgumentParser(add_help=False)
PLUGINS_PARSER.add_argument('plugin', nargs='+')
PLUGINS_DIR_PARSER = ArgumentParser(add_help=False)
mutex_path = PLUGINS_DIR_PARSER.add_mutually_exclusive_group()
mutex_path.add_argument(*CONFIG_PARSER_ARGS[0], **CONFIG_PARSER_ARGS[1])
mutex_path.add_argument('-d', '--plugins-directory', type=path,
help='MicroDrop plugins directory '
'(default="{default}").'
.format(default=default_plugins_directory))
MPM_PARSER = ArgumentParser(add_help=False, parents=[LOG_PARSER,
PLUGINS_DIR_PARSER])
subparsers = MPM_PARSER.add_subparsers(help='help for subcommand',
dest='command')
install_parser = subparsers.add_parser('install', help='Install plugins.',
parents=[SERVER_PARSER])
install_parser.add_argument('--no-on-install', action='store_true',
help='Do not run `on_plugin_install` hook after '
'installing plugin')
plugin_group = install_parser.add_mutually_exclusive_group(required=True)
plugin_group.add_argument('-r', '--requirements-file', type=path)
plugin_group.add_argument('plugin', nargs='*', default=[])
search_parser = subparsers.add_parser('search', help='Search server for '
'plugin.', parents=[SERVER_PARSER])
search_parser.add_argument('plugin')
subparsers.add_parser('uninstall', help='Uninstall plugins.',
parents=[PLUGINS_PARSER])
subparsers.add_parser('freeze', help='Output installed packages in '
'requirements format.')
hook_parser = subparsers.add_parser('hook', help='Execute plugin hook')
hook_parser.add_argument('hook', choices=['on_install'], help='Plugin hook')
hook_parser.add_argument('plugin', nargs='*')
def parse_args(args=None):
'''Parses arguments, returns ``(options, args)``.'''
if args is None:
args = sys.argv
parser = ArgumentParser(description='MicroDrop plugin manager',
parents=[MPM_PARSER])
return parser.parse_args()
def validate_args(args):
'''
Apply custom validation and actions based on parsed arguments.
Parameters
----------
args : argparse.Namespace
Result from ``parse_args`` method of ``argparse.ArgumentParser``
instance.
Returns
-------
argparse.Namespace
Reference to input ``args``, which have been validated/updated.
'''
logging.basicConfig(level=getattr(logging, args.log_level.upper()))
if getattr(args, 'command', None) == 'install':
if args.requirements_file and not args.requirements_file.isfile():
print >> sys.stderr, ('Requirements file not available: {}'
.format(args.requirements_file))
raise SystemExit(-1)
elif not args.plugin and not args.requirements_file:
print >> sys.stderr, ('Requirements file or at least one plugin '
'must be specified.')
raise SystemExit(-2)
if hasattr(args, 'server_url'):
logger.debug('Using MicroDrop index server: "%s"', args.server_url)
args.server_url = SERVER_URL_TEMPLATE % args.server_url
if all([args.plugins_directory is None,
args.config_file is None]):
args.plugins_directory = get_plugins_directory()
elif args.plugins_directory is None:
args.config_file = args.config_file.realpath()
args.plugins_directory = get_plugins_directory(config_path=
args.config_file)
else:
args.plugins_directory = args.plugins_directory.realpath()
return args
def main(args=None):
if args is None:
args = parse_args()
args = validate_args(args)
logger.debug('Arguments: %s', args)
if args.command == 'freeze':
print '\n'.join(freeze(plugins_directory=args.plugins_directory))
elif args.command == 'hook':
if not args.plugin:
plugin_paths = args.plugins_directory.dirs()
else:
plugin_paths = [args.plugins_directory.joinpath(p)
for p in args.plugin]
print 50 * '*'
print '# Processing `on_install` hook for: #\n'
print '\n'.join([' - {}{}'.format(p.name, '' if p.exists()
else ' (not found)')
for p in plugin_paths])
print ''
if args.hook == 'on_install':
for plugin_path_i in plugin_paths:
print 50 * '-'
if plugin_path_i.exists():
on_plugin_install(plugin_path_i)
else:
print >> sys.stderr, '[warning] Skipping missing plugin'
elif args.command == 'install':
if args.requirements_file:
args.plugin = [line.strip() for line in
args.requirements_file.lines()
if not line.startswith('#')]
for plugin_i in args.plugin:
try:
path_i, meta_i = install(plugin_package=plugin_i,
plugins_directory=
args.plugins_directory,
server_url=args.server_url)
if not args.no_on_install:
on_plugin_install(path_i)
except KeyError, exception:
print '[{}] {}'.format(plugin_i, exception.message)
except ValueError, exception:
print exception.message
continue
elif args.command == 'search':
try:
plugin_name, releases = search(plugin_package=args.plugin,
server_url=args.server_url)
release_info = OrderedDict()
release_info['plugin_name'] = [plugin_name] + ((len(releases) - 1)
* [''])
release_info['version'] = releases.keys()
for k in ['upload_time', 'size']:
release_info[k] = [r[k] for r in releases.values()]
release_info['upload_time'] = map(lambda timestamp: dt.datetime
.strptime(timestamp,
r'%Y-%m-%dT'
r'%H:%M:%S.%f')
.strftime('%Y-%m-%d %H:%M'),
release_info['upload_time'])
release_info['size'] = map(lambda s:
si.si_format(s, precision=0, format_str=
'{value} {prefix}B'),
release_info['size'])
print '\n' + pformat_dict(release_info)
except KeyError, exception:
print >> sys.stderr, exception.message
elif args.command == 'uninstall':
for plugin_i in args.plugin:
uninstall(plugin_package=plugin_i,
plugins_directory=args.plugins_directory)
|
999,574 | 5a2fe7dccab72dc1ae350ec7b1e8d6ac441de776 | import os
import math
import numpy as np
import pandas as pd
from utils import data_loader
import utils.display as display
from sklearn.metrics.pairwise import haversine_distances
from scipy.stats import norm
from dask.distributed import Client
from dask import delayed
def find_sequence(c_s, weight, phi, i, k, a_i, trajectory_len):
t_phi = [item.copy() for item in phi]
# c_s['f'] = np.NINF
# c_s['pre'] = None
f = [np.NINF] * len(c_s.index)
pre = [None] * len(c_s.index)
# set f of first trajectory point
s = c_s['t_p_i'].searchsorted(0, side='left')
e = c_s['t_p_i'].searchsorted(0, side='right')
# c_s.loc[0, 'f'] = weight[0] * c_s.loc[0, 'epsilon'].values
f[s:e] = weight[0] * c_s.iloc[s:e, c_s.columns.get_loc('epsilon')]
for s in range(a_i):
if s != k:
if 0 == i:
f[s] = np.NINF
# c_s.loc[(i, s), 'f'] = np.NINF
else:
t_phi[i - 1][:, s] = np.NINF
j_pre_s = 0
j_pre_e = c_s['t_p_i'].searchsorted(0, side='right')
for j in range(1, trajectory_len):
j_s = c_s['t_p_i'].searchsorted(j, side='left')
j_e = c_s['t_p_i'].searchsorted(j, side='right')
for s in range(j_e - j_s):
# print(c_s.loc[j - 1, 'f'], t_phi[j - 1][:, s])
last_f = f[j_pre_s: j_pre_e]
f[j_s + s] = max(last_f + t_phi[j - 1][:, s])
pre[j_s + s] = np.argmax(last_f + t_phi[j - 1][:, s])
# c_s.loc[(j, s), 'f'] = max(c_s.loc[j - 1, 'f'] + t_phi[j - 1][:, s])
# c_s.loc[(j, s), 'pre'] = np.argmax(c_s.loc[j - 1, 'f'] + t_phi[j - 1][:, s])
j_pre_s = j_s
j_pre_e = j_e
r_list = []
last_i = trajectory_len - 1
lst_i_s = c_s['t_p_i'].searchsorted(last_i, side='left')
c = np.argmax(f[lst_i_s:])
for j in reversed(range(1, trajectory_len)):
s = c_s['t_p_i'].searchsorted(j, side='left')
e = c_s['t_p_i'].searchsorted(j, side='right')
r_list.append(c)
c = pre[s+c]
r_list.append(c)
return max(f[lst_i_s:]), list(reversed(r_list))
def traverse_trajectory_point(i, M, cur_vehicle, candidate_set, trajectory_len):
# compute phi for current trajectory point
phi = [item.copy() for item in M]
# prepare trajectory location
loc_i = cur_vehicle.iloc[[i], [cur_vehicle.columns.get_loc('latitude'),
cur_vehicle.columns.get_loc('longitude')]].values
loc_all = cur_vehicle.loc[cur_vehicle.index != i, ['latitude', 'longitude']].values
# compute trajectory point distance
tp_dis = 6371009 * haversine_distances(np.radians(loc_i), np.radians(loc_all))[0]
weight = np.exp(-(tp_dis * tp_dis) / (7000 * 7000))
for _, w in enumerate(weight):
phi[_] = w * phi[_]
s = candidate_set['t_p_i'].searchsorted(i, side='left')
e = candidate_set['t_p_i'].searchsorted(i, side='right')
c_s_f_v = []
c_s_p = []
for k in range(e - s):
c_s = candidate_set.copy()
f_value, P = find_sequence(c_s, weight, phi, i, k, e - s, trajectory_len)
c_s_f_v.append(f_value)
if np.isinf(f_value):
# print('Bad local optimal path occurs, ignored.')
continue
c_s_p.append(P)
return c_s_f_v, c_s_p
def vote(res_set, seg_name):
vote = [dict() for idx in range(len(res_set))]
for idx, item in enumerate(res_set):
for path in item[1]:
for i_idx, c_p in enumerate(path):
vote[i_idx][c_p] = vote[i_idx][c_p] + 1 if c_p in vote[i_idx] else 1
global_optimal_path = []
for idx, item in enumerate(vote):
best = []
best_v = 0
for k, v in item.items():
if v > best_v:
best = [k]
best_v = v
elif v == best_v:
best.append(k)
if not len(best):
return global_optimal_path
global_optimal_path.append(best[np.argmax([res_set[idx][0][i] for i in best])])
if np.isinf(best[np.argmax([res_set[idx][0][i] for i in best])]):
print(seg_name, "卧槽,坏了")
return global_optimal_path
if __name__ == '__main__':
display.configure_pandas()
ssm_path = 'result/ssm'
diag_M = np.load(os.path.join(ssm_path, 'temp.npy'))
# load trajectory data
data = data_loader.load_vehicles(n=1, max_length=3)
cur_vehicle = data[0][0]
# load candidate set
candidate_set = pd.read_csv('result/candidate_point.csv')
first_seg_len = candidate_set.loc[candidate_set['t_p_i']==candidate_set.index[0]].shape[0]
M = []
for t_p_i in range(candidate_set['t_p_i'].min(), candidate_set['t_p_i'].max()):
pre_s = candidate_set['t_p_i'].searchsorted(t_p_i, side='left')
pre_e = candidate_set['t_p_i'].searchsorted(t_p_i, side='right')
next_s = candidate_set['t_p_i'].searchsorted(t_p_i + 1, side='left')
next_e = candidate_set['t_p_i'].searchsorted(t_p_i + 1, side='right')
M.append(diag_M[pre_s:pre_e, next_s - first_seg_len:next_e - first_seg_len])
epsilon_u = 5
epsilon_sigma = 10
candidate_set['epsilon'] = (
norm(epsilon_u, epsilon_sigma).pdf(candidate_set['residual'])
* epsilon_sigma
* math.sqrt(2 * math.pi)
)
print(candidate_set)
c_s = candidate_set.groupby('t_p_i').apply(lambda x: x.reset_index(drop=True))
c_s.index.rename(['i', 'k'], inplace=True)
c_s.drop(columns='t_p_i', inplace=True)
F = []
P = []
res_set = []
client = Client(n_workers=4)
for i in range(candidate_set['t_p_i'].min(), candidate_set['t_p_i'].max() + 1):
res = delayed(traverse_trajectory_point)(i, M, cur_vehicle, candidate_set, c_s, 10)
res_set.append(res)
compute = delayed(vote)(res_set, F, P)
global_optimal_path = compute.compute()
print(c_s.loc[[(i, j) for i, j in enumerate(global_optimal_path)]])
client.close()
|
999,575 | aeacdd905ca7d1b211f02bcbd8d508b5c33af7ed | # Django settings for mysite project.
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DATABASE_ENGINE = 'mysql' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = 'D:/python/mysite/media'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates')
)
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_PORT = ''
EMAIL_FROM = ''
SERVER_EMAIL = ''
DEFAULT_FROM_EMAIL = ''
from settings_common import *
|
999,576 | 08702d9bf80f14b4e7e24cadfc7da6e7b10f0444 | #-*- coding: utf-8 -*- ##设置编码方式
import win32com.client
import win32api,win32gui,win32con
import time
from PIL import ImageGrab
def mouse_move(x,y):
win32api.SetCursorPos( [x, y] )
def mouse_click(x=None,y=None):
if not x is None and not y is None:
mouse_move( x, y )
time.sleep(0.03)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0, 0, 0)
time.sleep(0.05)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0, 0, 0)
def window_capture(hwnd):
hwndDC = win32gui.GetWindowDC( hwnd )
mfcDC = win32gui.CreateDCFromHandle( hwndDC )
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32gui.CreateBitmap()
MoniterDev = win32api.EnumDisplayMonitors( None, None )
w = MoniterDev[0][2][2]
h = MoniterDev[0][2][3]
print( w, h )
saveBitMap.CreateCompatibleBitmap( mfcDC, w, h )
saveDC.SelectObject( saveBitMap )
saveDC.BitBlt( ( 0, 0 ), ( w, h ) , mfcDC, ( 0, 0 ), win32con.SRCCOPY )
bmpname = win32api.GetTempFileName( ".", "" )[0] + '.bmp'
saveBitMap.SaveBitmapFile( saveDC, bmpname )
return bmpname
im = window_capture()
import PIL
box = ( int( left ), int( top ), int( right ), int( bottom ) )
region = im.crop( box )
region.save( 't.bmp', 'BMP' )
import Image
import sys
from pyocr import pyocr
tools = pyocr.get_available_tools()[:]
if len(tools) == 0:
print("No OCR tool found")
sys.exit(1)
print("Using '%s'" % (tools[0].get_name()))
tools[0].image_to_string(Image.open('test.png'), lang='fra', builder=TextBuilder())
import pytesser
text1 = image_file_to_string( 't.bmp', graceful_errors = True )
print( "\r\nmy char: " )
print( text1 )
from datetime import datetime
s = datetime.now()
s1 = '%02s%02s%02s' % ( s.hour, s.minute, s.second )
rslt = open( "%s.txt" % s1, 'w' )
rslt.write( text1 )
def ocrAcct():
pass
def buy(code, p, v):
pass
htwt = u'网上股票交易系统5.0' #此处假设主窗口名为tt
notepad = u'无标题 - 记事本'
appHWnd = win32gui.FindWindow(None, htwt)
wsh = win32com.client.Dispatch( "Wscript.Shell" )
if False == wsh.AppActivate( htwt ):
pass
rect = win32gui.GetWindowRect(appHWnd)
print(rect)
mouse_click( (rect[0]+rect[2])/2, (rect[1]+rect[3])/2)
time.sleep( 0.1 )
wsh.SendKeys( "{F1}" )
time.sleep( 0.1 )
wsh.SendKeys( "%R" )
time.sleep( 0.1 )
wsh.SendKeys( "002081" )
time.sleep( 0.1 )
wsh.SendKeys( "{ENTER}" )
time.sleep( 0.1 )
wsh.SendKeys( "%R" )
time.sleep( 0.1 )
wsh.SendKeys( "002375" )
time.sleep( 0.1 )
wsh.SendKeys( "{ENTER}" )
time.sleep( 0.1 )
wsh.SendKeys( "12.34" )
time.sleep( 0.1 )
wsh.SendKeys( "{ENTER}" )
time.sleep( 0.1 )
wsh.SendKeys( "100" )
time.sleep( 0.1 )
wsh.SendKeys( "{ENTER}" )
time.sleep( 0.1 )
wsh.SendKeys( "%y" )
win32gui.ShowWindow(appHWnd,win32con.SW_SHOWNORMAL)
win32gui.SetActiveWindow(appHWnd)
win32gui.SendMessage(appHWnd, win32con.WM_KEYDOWN, 49, 0)
win32gui.PostMessage(appHWnd, win32con.WM_KEYDOWN, win32con.VK_F3, 0)
time.sleep(0.03)
mouse_click( (rect[0]+rect[2])/2, (rect[1]+rect[3])/2)
time.sleep(0.03)
win32gui.SendMessage(appHWnd, win32con.WM_KEYDOWN, 49, 0)
#win32gui.SetForegroundWindow(appHWnd)
#win32gui.PostMessage(appHWnd, win32con.WM_KEYDOWN, win32con.VK_F1, 0)
time.sleep(0.03)
#win32gui.PostMessage(appHWnd, win32con.WM_KEYDOWN, win32con.VK_TAB, 0)
time.sleep(0.03)
win32gui.SendMessage(appHWnd, win32con.WM_KEYDOWN, win32con.VK_F1, 0)
win32gui.SendMessage(appHWnd, win32con.WM_KEYDOWN, win32con.VK_TAB, 0)
for i in range(100000000):
x=i
win32gui.PostMessage(appHWnd, win32con.WM_KEYDOWN, ord('r'), 1<<29)
code = '002081'
for c in code:
win32gui.PostMessage(appHWnd, win32con.WM_KEYDOWN, ord(c), 0)
win32gui.PostMessage(appHWnd, win32con.WM_KEYDOWN, win32con.VK_RETURN, 0)
p = '123.45'
for c in p:
win32gui.PostMessage(appHWnd, win32con.WM_KEYDOWN, ord(c), 0)
win32gui.PostMessage(appHWnd, win32con.WM_KEYDOWN, 'r', 1<<29)
print appHWnd
print "---------------------------------"
time.sleep(1)
button = u'确定[&S]'
bbb = u'刷新 Firefox'
tree = [{'cptn':None, 'cls':'AfxMDIFrame42s'}, {'cptn':None, 'cls':'AfxWnd42s'}, {'cptn':'HexinScrollWnd', 'cls':'Afx:400000:0'}, {'cptn':'HexinScrollWnd2', 'cls':'AfxWnd42s'}, {'cptn':None, 'cls':'SysTreeView32'} ]
cncl = [{'cptn':None, 'cls':'AfxMDIFrame42s'}, {'cptn':None, 'cls':'#32770 (Dialog)'}, {'cptn':'全部选中', 'cls':'Button'} ]
cncl = [{'cptn':None, 'cls':'AfxMDIFrame42s'}, {'cptn':None, 'cls':'#32770 (Dialog)'}, {'cptn':'撤单', 'cls':'Button'} ]
cncl = [{'cptn':None, 'cls':'AfxMDIFrame42s'}, {'cptn':None, 'cls':'#32770 (Dialog)'}, {'cptn':'撤最后一笔(V)', 'cls':'Button'} ]
cnclList = [{'cptn':None, 'cls':'AfxMDIFrame42s'}, {'cptn':None, 'cls':'#32770 (Dialog)'}, {'cptn':'HexinScrollWnd', 'cls':'Afx:400000:0'}, {'cptn':'HexinScrollWnd2', 'cls':'AfxWnd42s'}, {'cptn':'Custom1', 'cls':'CVirtualGridCtrl'} ]
|
999,577 | 732ef01804c5d108b7dff47cd32bf7172987576a | import serial
import time
ser = serial.Serial('/dev/ttyACM0',115200,timeout=0.5)
time.sleep(3)
data = bytearray(b'R000F000R0000')
ser.write(data)
while 1:
for i in range(100, 255):
i=str(i)
temp = 'R' + '000' + 'F' + i + 'R0000'
data = bytes(temp, encoding='ascii')
ser.write(data)
time.sleep(0.1)
print(ser.readline()) |
999,578 | 2f8a3dde6788650807debac98a936eed3f84280c | from django.contrib import admin
from shop.models import Product, Order, ProductInOrder
from accounts.models import UserManager, User
admin.site.register(Product)
admin.site.register(Order)
admin.site.register(ProductInOrder)
admin.site.register(User)
|
999,579 | fe576866ab884fa8466fbed7f28570e2abb80117 | __author__ = "Jason"
# DONE
def camel_case(each_word):
return each_word[0:1].upper() + each_word[1:].lower()
# Return each word back with first letters capitalized
|
999,580 | c285b41bdd0eb30e8eecbbe7fa8ccf11c9dcb008 | """
data_curation_functions.py
Extract Kevin's functions for curation of public datasets
Modify them to match Jonathan's curation methods in notebook
01/30/2020
"""
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib_venn import venn3
import seaborn as sns
import pdb
from atomsci.ddm.utils.struct_utils import base_smiles_from_smiles
import atomsci.ddm.utils.datastore_functions as dsf
#from atomsci.ddm.utils import datastore_functions as dsf
from atomsci.ddm.utils import curate_data as curate
import atomsci.ddm.utils.struct_utils as struct_utils
import atomsci.ddm.utils.curate_data as curate_data, imp
def set_data_root(dir):
global data_root, data_dirs
data_root = dir
#data_dirs = dict(ChEMBL = '%s/ChEMBL' % data_root, DTC = '%s/DTC' % data_root,
# Excape = '%s/Excape' % data_root)
data_dirs = dict(DTC = '%s/dtc' % data_root,
Excape = '%s/excape' % data_root)
log_var_map = {
'IC50': 'pIC50',
'AC50': 'pIC50',
'Solubility': 'logSolubility',
'CL': 'logCL'
}
pub_dsets = dict(
CYP2D6 = dict(IC50='cyp2d6'),
CYP3A4 = dict(IC50='cyp3a4'),
JAK1 = dict(IC50="jak1"),
JAK2 = dict(IC50="jak2"),
JAK3 = dict(IC50="jak3"),
)
# ----------------------------------------------------------------------------------------------------------------------
# Generic functions for all datasets
# ----------------------------------------------------------------------------------------------------------------------
# Note: Functions freq_table and labeled_freq_table have been moved to ddm.utils.curate_data module.
# ----------------------------------------------------------------------------------------------------------------------
def standardize_relations(dset_df, db='DTC'):
"""
Standardize the censoring operators to =, < or >, and remove any rows whose operators
don't map to a standard one.
"""
relation_cols = dict(ChEMBL='Standard Relation', DTC='standard_relation')
rel_col = relation_cols[db]
dset_df[rel_col].fillna('=', inplace=True)
ops = dset_df[rel_col].values
if db == 'ChEMBL':
# Remove annoying quotes around operators
ops = [op.lstrip("'").rstrip("'") for op in ops]
op_dict = {
">": ">",
">=": ">",
"<": "<",
"<=": "<",
"=": "="
}
ops = np.array([op_dict.get(op, "@") for op in ops])
dset_df[rel_col] = ops
dset_df = dset_df[dset_df[rel_col] != "@"]
return dset_df
# ----------------------------------------------------------------------------------------------------------------------
# DTC-specific curation functions
# ----------------------------------------------------------------------------------------------------------------------
"""
Upload a raw dataset to the datastore from the given data frame.
Returns the datastore OID of the uploaded dataset.
"""
def upload_file_dtc_raw_data(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,file_path,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category':assay_category,
'assay_endpoint' : 'multiple values',
'curation_level': 'raw',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id'
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
#uploaded_file = dsf.upload_df_to_DS(dset_df, bucket, filename=filename, title=title,
# description=description,
# tags=tags, key_values=kv, client=None, dataset_key=dataset_key,
# override_check=True, return_metadata=True)
uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
'''
# ----------------------------------------------------------------------------------------------------------------------
def get_dtc_jak_smiles():
"""
Use PubChem REST API to download SMILES strings for InChi strings in DTC JAK123 data table
"""
jak_file = "%s/jak123_dtc.csv" % data_dirs['DTC']
dset_df = pd.read_csv(jak_file, index_col=False)
jak_dtc_df = jak_dtc_df[~jak_dtc_df.standard_inchi_key.isna()]
inchi_keys = sorted(set(jak_dtc_df.standard_inchi_key.values))
smiles_df, fail_list, discard_list = pu.download_smiles(inchi_keys)
smiles_df.to_csv('%s/jak123_inchi_smiles.csv' % data_dirs['DTC'], index=False)
# ----------------------------------------------------------------------------------------------------------------------
'''
def filter_dtc_data(orig_df,geneNames):
"""
Extract JAK1, 2 and 3 datasets from Drug Target Commons database, filtered for data usability.
"""
# filter criteria:
# gene_names == JAK1 | JAK2 | JAK3
# InChi key not missing
# standard_type IC50
# units NM
# standard_relation mappable to =, < or >
# wildtype_or_mutant != 'mutated'
# valid SMILES
# maps to valid RDKit base SMILES
# standard_value not missing
# pIC50 > 3
#--------------------------------------------------
# Filter dataset on existing columns
dset_df = orig_df[orig_df.gene_names.isin(geneNames) &
~(orig_df.standard_inchi_key.isna()) &
(orig_df.standard_type == 'IC50') &
(orig_df.standard_units == 'NM') &
~orig_df.standard_value.isna() &
~orig_df.compound_id.isna() &
(orig_df.wildtype_or_mutant != 'mutated') ]
return dset_df
def ic50topic50(x) :
print(x)
return -np.log10((x/1000000000.0))
def down_select(df,kv_lst) :
for k,v in kv_lst :
df=df[df[k]==v]
return df
def get_smiles_dtc_data(nm_df,targ_lst,save_smiles_df):
save_df={}
for targ in targ_lst :
lst1= [ ('gene_names',targ),('standard_type','IC50'),('standard_relation','=') ]
lst1_tmp= [ ('gene_names',targ),('standard_type','IC50')]
jak1_df=down_select(nm_df,lst1)
jak1_df_tmp=down_select(nm_df,lst1_tmp)
print(targ,"distinct compounds = only",jak1_df['standard_inchi_key'].nunique())
print(targ,"distinct compounds <,>,=",jak1_df_tmp['standard_inchi_key'].nunique())
## we convert to log values so make sure there are no 0 values
save_df[targ]=jak1_df_tmp[jak1_df_tmp['standard_value']>0]
prev_targ=targ_lst[0]
shared_inchi_keys=save_df[prev_targ]['standard_inchi_key']
for it in range(1,len(targ_lst),1) :
curr_targ=targ_lst[it]
df=save_df[curr_targ]
shared_inchi_keys=df[df['standard_inchi_key'].isin(shared_inchi_keys)]['standard_inchi_key']
print("num shared compounds",shared_inchi_keys.nunique())
lst=[]
for targ in targ_lst :
df=save_df[targ]
#print(aurka_df.shape,aurkb_df.shape, shared_inchi_keys.shape)
lst.append(df[df['standard_inchi_key'].isin(shared_inchi_keys)])
shared_df=pd.concat(lst)
# Add pIC50 values
print('Add pIC50 values.')
print(shared_df['standard_value'])
shared_df['PIC50']=shared_df['standard_value'].apply(ic50topic50)
# Merge in SMILES strings
print('Merge in SMILES strings.')
smiles_lst=[]
for targ in targ_lst :
df=save_df[targ]
df['PIC50']=df['standard_value'].apply(ic50topic50)
smiles_df=df.merge(save_smiles_df,on='standard_inchi_key',suffixes=('_'+targ,'_'))
#the file puts the SMILES string in quotes, which need to be removed
smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
smiles_df['rdkit_smiles']=smiles_df['smiles'].apply(struct_utils.base_smiles_from_smiles)
smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
print(smiles_df.shape)
print(smiles_df['standard_inchi_key'].nunique())
smiles_lst.append(smiles_df)
return smiles_lst, shared_inchi_keys
def get_smiles_4dtc_data(nm_df,targ_lst,save_smiles_df):
save_df={}
description_str = ""
for targ in targ_lst :
lst1= [ ('gene_names',targ),('standard_type','IC50'),('standard_relation','=') ]
lst1_tmp= [ ('gene_names',targ),('standard_type','IC50')]
jak1_df=down_select(nm_df,lst1)
jak1_df_tmp=down_select(nm_df,lst1_tmp)
print(targ,"distinct compounds = only",jak1_df['standard_inchi_key'].nunique())
print(targ,"distinct compounds <,>,=",jak1_df_tmp['standard_inchi_key'].nunique())
description = '''
# '''+targ+" distinct compounds = only: "+str(jak1_df['standard_inchi_key'].nunique())+'''
# '''+targ+" distinct compounds <,>,=: "+str(jak1_df_tmp['standard_inchi_key'].nunique())
description_str += description
#to ignore censored data
#save_df[targ]=jak1_df
#to include censored data
save_df[targ]=jak1_df_tmp
prev_targ=targ_lst[0]
shared_inchi_keys=save_df[prev_targ]['standard_inchi_key']
for it in range(1,len(targ_lst),1) :
curr_targ=targ_lst[it]
df=save_df[curr_targ]
shared_inchi_keys=df[df['standard_inchi_key'].isin(shared_inchi_keys)]['standard_inchi_key']
print("num shared compounds",shared_inchi_keys.nunique())
lst=[]
for targ in targ_lst :
df=save_df[targ]
#print(aurka_df.shape,aurkb_df.shape, shared_inchi_keys.shape)
lst.append(df[df['standard_inchi_key'].isin(shared_inchi_keys)])
shared_df=pd.concat(lst)
# Add pIC50 values
print('Add pIC50 values.')
shared_df['PIC50']=shared_df['standard_value'].apply(ic50topic50)
# Merge in SMILES strings
print('Merge in SMILES strings.')
smiles_lst=[]
for targ in targ_lst :
df=save_df[targ]
df['PIC50']=df['standard_value'].apply(ic50topic50)
smiles_df=df.merge(save_smiles_df,on='standard_inchi_key',suffixes=('_'+targ,'_'))
#the file puts the SMILES string in quotes, which need to be removed
smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
smiles_df['rdkit_smiles']=smiles_df['smiles'].apply(struct_utils.base_smiles_from_smiles)
smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
print("Shape of dataframe:", smiles_df.shape)
print("Number of unique standard_inchi_key:", smiles_df['standard_inchi_key'].nunique())
smiles_lst.append(smiles_df)
return smiles_lst, shared_inchi_keys, description_str
'''
# Standardize the relational operators
dset_df = standardize_relations(dset_df, 'DTC')
# Map the InChi keys to SMILES strings. Remove rows that don't map.
smiles_file = "%s/jak123_inchi_smiles.csv" % data_dirs['DTC']
smiles_df = pd.read_csv(smiles_file, index_col=False)[['standard_inchi_key', 'smiles']]
smiles_df['smiles'] = [s.lstrip('"').rstrip('"') for s in smiles_df.smiles.values]
dset_df = dset_df.merge(smiles_df, how='left', on='standard_inchi_key')
dset_df = dset_df[~dset_df.smiles.isna()]
# Add standardized desalted RDKit SMILES strings
dset_df['rdkit_smiles'] = [base_smiles_from_smiles(s) for s in dset_df.smiles.values]
dset_df = dset_df[dset_df.rdkit_smiles != '']
# Add pIC50 values and filter on them
dset_df['pIC50'] = 9.0 - np.log10(dset_df.standard_value.values)
dset_df = dset_df[dset_df.pIC50 >= 3.0]
# Add censoring relations for pIC50 values
rels = dset_df['standard_relation'].values
log_rels = rels.copy()
log_rels[rels == '<'] = '>'
log_rels[rels == '>'] = '<'
dset_df['pIC50_relation'] = log_rels
# Split into separate datasets by gene name
curated_dir = "%s/curated" % data_dirs['DTC']
os.makedirs(curated_dir, exist_ok=True)
for gene in jak_genes:
gene_dset_df = dset_df[dset_df.gene_names == gene]
gene_dset_file = "%s/%s_DTC_curated.csv" % (curated_dir, gene)
gene_dset_df.to_csv(gene_dset_file, index=False)
print("Wrote file %s" % gene_dset_file)
'''
def upload_df_dtc_smiles(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,smiles_df,orig_fileID,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s_dtc_smiles.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'raw',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'source_file_id' : orig_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=smiles_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
# Apply ATOM standard 'curation' step to "shared_df": Average replicate assays, remove duplicates and drop cases with large variance between replicates.
# mleqonly
def atom_curation(targ_lst, smiles_lst, shared_inchi_keys):
imp.reload(curate_data)
tolerance=10
column='PIC50'; #'standard_value'
list_bad_duplicates='No'
max_std=1
curated_lst=[]
num_dropped_lst=[]
#print(targ_lst)
#print(smiles_lst)
for it in range(len(targ_lst)) :
data=smiles_lst[it]
data = data[data.standard_relation.str.strip() == '=']
print("gene_names",data.gene_names.unique())
print("standard_type",data.standard_type.unique())
print("standard_relation",data.standard_relation.unique())
print("before",data.shape)
curated_df=curate_data.average_and_remove_duplicates (column, tolerance, list_bad_duplicates, data, max_std, compound_id='standard_inchi_key',smiles_col='rdkit_smiles')
# (Yaru) Remove inf in curated_df
curated_df = curated_df[~curated_df.isin([np.inf]).any(1)]
# (Yaru) Remove nan on rdkit_smiles
curated_df = curated_df.dropna(subset=['rdkit_smiles'])
curated_lst.append(curated_df)
prev_cmpd_cnt=shared_inchi_keys.nunique()
num_dropped=prev_cmpd_cnt-curated_df.shape[0]
num_dropped_lst.append(num_dropped)
print("After",curated_df.shape, "# of dropped compounds",num_dropped)
return curated_lst,num_dropped_lst
# Use Kevin's "aggregate_assay_data()" to remove duplicates and generate base rdkit smiles
def aggregate_assay(targ_lst, smiles_lst):
tolerance=10
column='PIC50'; #'standard_value'
list_bad_duplicates='No'
max_std=1
for it in range(len(targ_lst)) :
data=smiles_lst[it]
print("before",data.shape)
temp_df=curate_data.aggregate_assay_data(data, value_col=column, output_value_col=None,
label_actives=True,
active_thresh=None,
id_col='standard_inchi_key', smiles_col='rdkit_smiles', relation_col='standard_relation')
# (Yaru) Remove inf in curated_df
temp_df = temp_df[~temp_df.isin([np.inf]).any(1)]
#censored_curated_df = censored_curated_df[~censored_curated_df.isin([np.inf]).any(1)]
return temp_df
def upload_df_dtc_mleqonly(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,dtc_smiles_fileID,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s_dtc_mleqonly.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : 'CYP2D6',
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'VALUE_NUM_mean',
'prediction_type' : 'regression',
'smiles_col' : 'rdkit_smiles',
'units' : 'unitless',
'source_file_id' : dtc_smiles_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def upload_df_dtc_mleqonly_class(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,dtc_mleqonly_fileID,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s_dtc_mleqonly_class.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'binary_class',
'prediction_type' : 'classification',
'num_classes' : 2,
'class_names' : ['inactive','active'],
'smiles_col' : 'rdkit_smiles',
'units' : 'unitless',
'source_file_id' : dtc_mleqonly_fileID
}
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def upload_df_dtc_base_smiles_all(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,dtc_mleqonly_fileID,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s_dtc_base_smiles_all.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'PIC50',
'prediction_type' : 'regression',
'smiles_col' : 'base_rdkit_smiles',
'units' : 'unitless',
'source_file_id' : dtc_mleqonly_fileID
}
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def upload_file_dtc_smiles_regr_all(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,file_path,dtc_smiles_fileID,
smiles_column, data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s_dtc_smiles_regr_all.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'PIC50',
'prediction_type' : 'regression',
'smiles_col' : smiles_column,
'units' : 'unitless',
'source_file_id' : dtc_smiles_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
#uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def upload_df_dtc_smiles_regr_all_class(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,dtc_smiles_regr_all_fileID,
smiles_column, data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s_dtc_smiles_regr_all_class.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://doi.org/10.1016/j.chembiol.2017.11.009',
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'PIC50',
'prediction_type' : 'classification',
'num_classes' : 2,
'smiles_col' : smiles_column,
'class_names' : ['inactive','active'],
'units' : 'unitless',
'source_file_id' : dtc_smiles_regr_all_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
# ----------------------------------------------------------------------------------------------------------------------
# Excape-specific curation functions
# ----------------------------------------------------------------------------------------------------------------------
"""
Upload a raw dataset to the datastore from the given data frame.
Returns the datastore OID of the uploaded dataset.
"""
def upload_file_excape_raw_data(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,file_path,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s_excape.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category':assay_category,
'assay_endpoint' : 'multiple values',
'curation_level': 'raw',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://dx.doi.org/10.1186%2Fs13321-017-0203-5', # ExCAPE-DB
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'Original_Entry_ID'
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
#uploaded_file = dsf.upload_df_to_DS(dset_df, bucket, filename=filename, title=title,
# description=description,
# tags=tags, key_values=kv, client=None, dataset_key=dataset_key,
# override_check=True, return_metadata=True)
uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def get_smiles_excape_data(nm_df,targ_lst):
# Delete NaN
nm_df = nm_df.dropna(subset=['pXC50'])
# (Yaru) Use nm_df, which has removed nan's
# Don't need to retrieve SMILES, since already in excape file
# No filtering by censored
save_df={}
targ = targ_lst[0]
save_df[targ_lst[0]] = nm_df
print(targ,"distinct compounds = only",nm_df['Ambit_InchiKey'].nunique())
shared_inchi_keys = nm_df['Ambit_InchiKey']
# Merge in SMILES strings
smiles_lst=[]
save_df[targ_lst[0]] = nm_df
for targ in targ_lst :
df=save_df[targ]
smiles_df = df
#df['PIC50']=df['standard_value'].apply(ic50topic50)
#smiles_df=df.merge(save_smiles_df,on='standard_inchi_key',suffixes=('_'+targ,'_'))
#the file puts the SMILES string in quotes, which need to be removed
#smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
smiles_df['rdkit_smiles']=smiles_df['SMILES'].apply(struct_utils.base_smiles_from_smiles)
#smiles_df['smiles']=smiles_df['smiles'].str.replace('"','')
print(smiles_df.shape)
print(smiles_df['Ambit_InchiKey'].nunique())
smiles_lst.append(smiles_df)
return smiles_lst, shared_inchi_keys
def upload_df_excape_smiles(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,smiles_df,orig_fileID,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s_dtc_smiles.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'raw',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://dx.doi.org/10.1186%2Fs13321-017-0203-5', # ExCAPE-DB
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'Original_Entry_ID',
'source_file_id' : orig_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=smiles_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
# Apply ATOM standard 'curation' step to "shared_df": Average replicate assays, remove duplicates and drop cases with large variance between replicates.
# mleqonly
def atom_curation_excape(targ_lst, smiles_lst, shared_inchi_keys):
imp.reload(curate_data)
tolerance=10
column='pXC50'; #'standard_value'
list_bad_duplicates='No'
max_std=1
curated_lst=[]
#print(targ_lst)
#print(smiles_lst)
for it in range(len(targ_lst)) :
data=smiles_lst[it]
#data = data[data.standard_relation.str.strip() == '=']
#print("gene_names",data.gene_names.unique())
#print("standard_type",data.standard_type.unique())
#print("standard_relation",data.standard_relation.unique())
print("before",data.shape)
curated_df=curate_data.average_and_remove_duplicates (column, tolerance, list_bad_duplicates, data, max_std, compound_id='standard_inchi_key',smiles_col='rdkit_smiles')
# (Yaru) Remove inf in curated_df
curated_df = curated_df[~curated_df.isin([np.inf]).any(1)]
# (Yaru) Remove nan on rdkit_smiles
curated_df = curated_df.dropna(subset=['rdkit_smiles'])
curated_df = curated_df.dropna(subset=['VALUE_NUM_mean'])
curated_df = curated_df.dropna(subset=['pXC50'])
# (Kevin)
# Filter criteria:
# pXC50 not missing
# rdkit_smiles not blank
# pXC50 > 3
#dset_df = dset_df[dset_df.pXC50 >= 3.0]
curated_lst.append(curated_df)
prev_cmpd_cnt=shared_inchi_keys.nunique()
num_dropped=prev_cmpd_cnt-curated_df.shape[0]
print("After",curated_df.shape, "# of dropped compounds",num_dropped)
return curated_lst
def upload_df_excape_mleqonly(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,smiles_fileID,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s_dtc_mleqonly.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://dx.doi.org/10.1186%2Fs13321-017-0203-5', # ExCAPE-DB
'sample_type' : 'in_vitro',
'species' : species,
'target' : 'CYP2D6',
'target_type' : target_type,
'id_col' : 'Original_Entry_ID',
'response_col' : 'VALUE_NUM_mean',
'prediction_type' : 'regression',
'smiles_col' : 'rdkit_smiles',
'units' : 'unitless',
'source_file_id' : smiles_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
def upload_df_excape_mleqonly_class(dset_name, title, description, tags,
functional_area,
target, target_type, activity, assay_category,data_df,mleqonly_fileID,
data_origin='journal', species='human',
force_update=False):
bucket = 'public'
filename = '%s_dtc_mleqonly_class.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = { 'file_category': 'experimental',
'activity': activity,
'assay_category': assay_category, ## seems like this should be called 'kinase_activity'
'assay_endpoint' : 'pic50',
'curation_level': 'ml_ready',
'data_origin' : data_origin,
'functional_area' : functional_area,
'matrix' : 'multiple values',
'journal_doi' : 'https://dx.doi.org/10.1186%2Fs13321-017-0203-5', # ExCAPE-DB
'sample_type' : 'in_vitro',
'species' : species,
'target' : target,
'target_type' : target_type,
'id_col' : 'compound_id',
'response_col' : 'binary_class',
'prediction_type' : 'classification',
'num_classes' : 2,
'class_names' : ['inactive','active'],
'smiles_col' : 'rdkit_smiles',
'units' : 'unitless',
'source_file_id' : mleqonly_fileID
}
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
uploaded_file = dsf.upload_df_to_DS(bucket=bucket, filename=filename,df=data_df, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
#uploaded_file = dsf.upload_file_to_DS(bucket=bucket, filepath=file_path, filename=filename, title = title, description=description, tags=tags, key_values=kv, client=None, dataset_key=dataset_key, override_check=False, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
uploaded_file = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = uploaded_file['dataset_oid']
return raw_dset_oid
'''
# ----------------------------------------------------------------------------------------------------------------------
def curate_excape_jak_datasets():
"""
Extract JAK1, 2 and 3 datasets from Excape database, filtered for data usability.
"""
# Filter criteria:
# pXC50 not missing
# rdkit_smiles not blank
# pXC50 > 3
jak_file = "%s/jak123_excape_smiles.csv" % data_dirs['Excape']
dset_df = pd.read_csv(jak_file, index_col=False)
dset_df = dset_df[ ~dset_df.pXC50.isna() & ~dset_df.rdkit_smiles.isna() ]
dset_df = dset_df[dset_df.pXC50 >= 3.0]
jak_genes = ['JAK1', 'JAK2', 'JAK3']
# Split into separate datasets by gene name
curated_dir = "%s/curated" % data_dirs['Excape']
os.makedirs(curated_dir, exist_ok=True)
for gene in jak_genes:
gene_dset_df = dset_df[dset_df.Gene_Symbol == gene]
gene_dset_file = "%s/%s_Excape_curated.csv" % (curated_dir, gene)
gene_dset_df.to_csv(gene_dset_file, index=False)
print("Wrote file %s" % gene_dset_file)
'''
'''
# ----------------------------------------------------------------------------------------------------------------------
# ChEMBL-specific curation functions
# ----------------------------------------------------------------------------------------------------------------------
# Raw ChEMBL datasets as downloaded from the interactive ChEMBL web app are labeled as compressed CSV files,
# but they are actually semicolon-separated. After decompressing them, we change the extension to .txt so we
# can open them in Excel without it getting confused. These files can be placed in data_dirs['ChEMBL']/raw
# for initial processing.
# ----------------------------------------------------------------------------------------------------------------------
def filter_chembl_dset(dset_df):
"""
Filter rows from a raw dataset downloaded from the ChEMBL website. Standardize censoring relational operators.
Add columns for the log-transformed value, the censoring relation for the log value, and the base RDKit SMILES
string. Returns a filtered data frame.
"""
# Filter out rows with no SMILES string or IC50 data
dset_df = dset_df[~dset_df['Smiles'].isna()]
dset_df = dset_df[~dset_df['Standard Value'].isna()]
# Filter out rows flagged as likely duplicates
dset_df = dset_df[dset_df['Potential Duplicate'] == False]
# Filter out rows flagged with validity concerns (e.g., value out of typical range)
dset_df = dset_df[dset_df['Data Validity Comment'].isna()]
# Filter out rows with nonstandard measurement types. We assume here that the type appearing
# most frequently is the standard one.
type_df = curate.freq_table(dset_df, 'Standard Type')
max_type = type_df['Standard Type'].values[0]
if type_df.shape[0] > 1:
print('Dataset has multiple measurement types')
print(type_df)
dset_df = dset_df[dset_df['Standard Type'] == max_type]
# Filter out rows with nonstandard units. Again, we assume the unit appearing most frequently
# is the standard one.
unit_freq_df = curate.freq_table(dset_df, 'Standard Units')
max_unit = unit_freq_df['Standard Units'].values[0]
if unit_freq_df.shape[0] > 1:
print('Dataset has multiple standard units')
print(unit_freq_df)
dset_df = dset_df[dset_df['Standard Units'] == max_unit]
# Standardize the censoring operators to =, < or >, and remove any rows whose operators
# don't map to a standard one.
dset_df = standardize_relations(dset_df, db='ChEMBL')
# Add a column for the pIC50 or log-transformed value, and a column for the associated censoring relation.
# For pXC50 values, this will be the opposite of the original censoring relation.
ops = dset_df['Standard Relation'].values
log_ops = ops.copy()
if (max_type in ['IC50', 'AC50']) and (max_unit == 'nM'):
dset_df['pIC50'] = 9.0 - np.log10(dset_df['Standard Value'].values)
log_ops[ops == '>'] = '<'
log_ops[ops == '<'] = '>'
elif (max_type == 'Solubility') and (max_unit == 'nM'):
dset_df['logSolubility'] = np.log10(dset_df['Standard Value'].values) - 9.0
elif max_type == 'CL':
dset_df['logCL'] = np.log10(dset_df['Standard Value'].values)
dset_df['LogVarRelation'] = log_ops
# Add a column for the standardized base SMILES string. Remove rows with SMILES strings
# that RDKit wasn't able to parse.
dset_df['rdkit_smiles'] = [base_smiles_from_smiles(s) for s in dset_df.Smiles.values]
dset_df = dset_df[dset_df.rdkit_smiles != '']
return dset_df
# ----------------------------------------------------------------------------------------------------------------------
def filter_all_chembl_dsets(force_update=False):
"""
Generate filtered versions of all the raw datasets present in the data_dirs['ChEMBL']/raw directory.
Don't replace any existing filtered file unless force_update is True.
"""
chembl_dir = data_dirs['ChEMBL']
raw_dir = "%s/raw" % chembl_dir
filt_dir = "%s/filtered" % chembl_dir
os.makedirs(filt_dir, exist_ok=True)
chembl_files = sorted(os.listdir(raw_dir))
for fn in chembl_files:
if fn.endswith('.txt'):
dset_name = fn.replace('.txt', '')
filt_path = "%s/%s_filt.csv" % (filt_dir, dset_name)
if not os.path.exists(filt_path) or force_update:
fpath = '%s/%s' % (raw_dir, fn)
dset_df = pd.read_table(fpath, sep=';', index_col=False)
print("Filtering dataset %s" % dset_name)
dset_df = filter_chembl_dset(dset_df)
dset_df.to_csv(filt_path, index=False)
print("Wrote filtered data to %s" % filt_path)
# ----------------------------------------------------------------------------------------------------------------------
def summarize_chembl_dsets():
"""
Generate a summary table describing the data in the filtered ChEMBL datasets.
"""
chembl_dir = data_dirs['ChEMBL']
filt_dir = "%s/filtered" % chembl_dir
stats_dir = "%s/stats" % chembl_dir
os.makedirs(stats_dir, exist_ok=True)
chembl_files = sorted(os.listdir(filt_dir))
dset_names = []
mtype_list = []
log_var_list = []
units_list = []
dset_sizes = []
num_left = []
num_eq = []
num_right = []
cmpd_counts = []
cmpd_rep_counts = []
max_cmpd_reps = []
assay_counts = []
max_assay_pts = []
max_assay_list = []
max_fmt_list = []
for fn in chembl_files:
if fn.endswith('.csv'):
fpath = '%s/%s' % (filt_dir, fn)
dset_df = pd.read_csv(fpath, index_col=False)
dset_name = fn.replace('_filt.csv', '')
dset_names.append(dset_name)
print("Summarizing %s" % dset_name)
dset_sizes.append(dset_df.shape[0])
type_df = curate.freq_table(dset_df, 'Standard Type')
max_type = type_df['Standard Type'].values[0]
mtype_list.append(max_type)
log_var = log_var_map[max_type]
log_var_list.append(log_var)
unit_freq_df = curate.freq_table(dset_df, 'Standard Units')
max_unit = unit_freq_df['Standard Units'].values[0]
units_list.append(max_unit)
log_ops = dset_df.LogVarRelation.values
uniq_ops, op_counts = np.unique(log_ops, return_counts=True)
op_count = dict(zip(uniq_ops, op_counts))
num_left.append(op_count.get('<', 0))
num_eq.append(op_count.get('=', 0))
num_right.append(op_count.get('>', 0))
smiles_df = curate.freq_table(dset_df, 'rdkit_smiles')
cmpd_counts.append(smiles_df.shape[0])
smiles_df = smiles_df[smiles_df.Count > 1]
cmpd_rep_counts.append(smiles_df.shape[0])
if smiles_df.shape[0] > 0:
max_cmpd_reps.append(smiles_df.Count.values[0])
else:
max_cmpd_reps.append(1)
mean_values = []
stds = []
cmpd_assays = []
for smiles in smiles_df.rdkit_smiles.values:
sset_df = dset_df[dset_df.rdkit_smiles == smiles]
vals = sset_df[log_var].values
mean_values.append(np.mean(vals))
stds.append(np.std(vals))
cmpd_assays.append(len(set(sset_df['Assay ChEMBL ID'].values)))
smiles_df['Mean_value'] = mean_values
smiles_df['Std_dev'] = stds
smiles_df['Num_assays'] = cmpd_assays
smiles_file = "%s/%s_replicate_cmpd_stats.csv" % (stats_dir, dset_name)
smiles_df.to_csv(smiles_file, index=False)
assay_df = curate.labeled_freq_table(dset_df, ['Assay ChEMBL ID', 'Assay Description', 'BAO Label'])
assay_counts.append(assay_df.shape[0])
max_assay_pts.append(assay_df.Count.values[0])
max_assay_list.append(assay_df['Assay Description'].values[0])
max_fmt_list.append(assay_df['BAO Label'].values[0])
assay_df = assay_df[assay_df.Count >= 20]
assay_file = "%s/%s_top_assay_summary.csv" % (stats_dir, dset_name)
assay_df.to_csv(assay_file, index=False)
summary_df = pd.DataFrame(dict(
Dataset=dset_names,
MeasuredValue=mtype_list,
LogValue=log_var_list,
Units=units_list,
NumPoints=dset_sizes,
NumUncensored=num_eq,
NumLeftCensored=num_left,
NumRightCensored=num_right,
NumCmpds=cmpd_counts,
NumReplicatedCmpds=cmpd_rep_counts,
MaxCmpdReps=max_cmpd_reps,
NumAssays=assay_counts,
MaxAssayPoints=max_assay_pts,
MaxAssay=max_assay_list,
MaxAssayFormat=max_fmt_list
))
summary_file = "%s/chembl_public_dataset_summary.csv" % stats_dir
summary_df.to_csv(summary_file, index=False, columns=['Dataset', 'NumPoints',
'NumUncensored', 'NumLeftCensored', 'NumRightCensored',
'NumCmpds', 'NumReplicatedCmpds', 'MaxCmpdReps',
'MeasuredValue', 'LogValue', 'Units',
'NumAssays', 'MaxAssayPoints', 'MaxAssayFormat', 'MaxAssay'])
print("Wrote summary table to %s" % summary_file)
# ----------------------------------------------------------------------------------------------------------------------
def plot_chembl_log_distrs():
"""
Plot distributions of the log-transformed values for each of the ChEMBL datasets
"""
chembl_dir = data_dirs['ChEMBL']
filt_dir = "%s/filtered" % chembl_dir
summary_file = "%s/stats/chembl_public_dataset_summary.csv" % chembl_dir
summary_df = pd.read_csv(summary_file, index_col=False)
dset_names = set(summary_df.Dataset.values)
# Plot distributions for the pairs of CYP datasets together
cyp_dsets = dict(
CYP2C9 = dict(AC50='CHEMBL25-CYP2C9_human_AC50_26Nov2019', IC50='CHEMBL25-CYP2C9_human_IC50_26Nov2019'),
CYP2D6 = dict(AC50='CHEMBL25-CYP2D6_human_AC50_26Nov2019', IC50='CHEMBL25-CYP2D6_human_IC50_26Nov2019'),
CYP3A4 = dict(AC50='CHEMBL25_CHEMBL25-CYP3A4_human_AC50_26Nov2019', IC50='CHEMBL25-CYP3A4_human_IC50_26Nov2019')
)
cyp_dset_names = []
for cyp in sorted(cyp_dsets.keys()):
ds_dict = cyp_dsets[cyp]
cyp_dset_names.append(ds_dict['AC50'])
cyp_dset_names.append(ds_dict['IC50'])
ac50_path = "%s/%s_filt.csv" % (filt_dir, ds_dict['AC50'])
ic50_path = "%s/%s_filt.csv" % (filt_dir, ds_dict['IC50'])
ac50_df = pd.read_csv(ac50_path, index_col=False)
ic50_df = pd.read_csv(ic50_path, index_col=False)
ac50_smiles = set(ac50_df.Smiles.values)
ic50_smiles = set(ic50_df.Smiles.values)
cmn_smiles = ac50_smiles & ic50_smiles
print("For %s: %d SMILES strings in both datasets" % (cyp, len(cmn_smiles)))
fig, ax = plt.subplots(figsize=(10,8))
ax = sns.distplot(ac50_df.pIC50.values, hist=False, kde_kws=dict(shade=True, bw=0.05), color='b', ax=ax, label='PubChem')
ic50_lc_df = ic50_df[ic50_df.LogVarRelation == '<']
ic50_rc_df = ic50_df[ic50_df.LogVarRelation == '>']
ic50_uc_df = ic50_df[ic50_df.LogVarRelation == '=']
ax = sns.distplot(ic50_uc_df.pIC50.values, hist=False, kde_kws=dict(shade=True, bw=0.05), color='g', ax=ax, label='Uncens')
ax = sns.distplot(ic50_lc_df.pIC50.values, hist=False, kde_kws=dict(shade=False, bw=0.05), color='r', ax=ax, label='LeftCens')
ax = sns.distplot(ic50_rc_df.pIC50.values, hist=False, kde_kws=dict(shade=False, bw=0.05), color='m', ax=ax, label='RightCens')
ax.set_xlabel('pIC50')
ax.set_title('Distributions of %s dataset values' % cyp)
plt.show()
other_dset_names = sorted(dset_names - set(cyp_dset_names))
for dset_name in other_dset_names:
log_var = summary_df.LogValue.values[summary_df.Dataset == dset_name][0]
filt_path = "%s/%s_filt.csv" % (filt_dir, dset_name)
dset_df = pd.read_csv(filt_path, index_col=False)
uc_df = dset_df[dset_df.LogVarRelation == '=']
lc_df = dset_df[dset_df.LogVarRelation == '<']
rc_df = dset_df[dset_df.LogVarRelation == '>']
log_uc_values = uc_df[log_var].values
log_lc_values = lc_df[log_var].values
log_rc_values = rc_df[log_var].values
fig, ax = plt.subplots(figsize=(10,8))
ax = sns.distplot(log_uc_values, hist=False, kde_kws=dict(shade=True, bw=0.05), color='b', ax=ax, label='Uncens')
ax = sns.distplot(log_lc_values, hist=False, kde_kws=dict(shade=False, bw=0.05), color='r', ax=ax, label='LeftCens')
ax = sns.distplot(log_rc_values, hist=False, kde_kws=dict(shade=False, bw=0.05), color='m', ax=ax, label='RightCens')
ax.set_xlabel(log_var)
ax.set_title('Distribution of log transformed values for %s' % dset_name)
plt.show()
# ----------------------------------------------------------------------------------------------------------------------
def curate_chembl_xc50_assay(dset_df, target, endpoint, database='ChEMBL25'):
"""
Examine data from individual ChEMBL assays in the given dataset to look for suspicious patterns of XC50
values and censoring relations. Add relations where they appear to be needed, and filter out data from
assays that seem to have only one-shot categorical data.
"""
chembl_root = data_dirs['ChEMBL']
assay_df = curate.freq_table(dset_df, 'Assay ChEMBL ID')
assays = assay_df['Assay ChEMBL ID'].values
counts = assay_df['Count'].values
num_eq = []
num_lt = []
num_gt = []
max_xc50s = []
num_max_xc50 = []
min_xc50s = []
num_min_xc50 = []
# For each assay ID, tabulate the number of occurrences of each relation, the min and max XC50
# and the number of values reported as the max or min XC50
for assay in assays:
assay_dset_df = dset_df[dset_df['Assay ChEMBL ID'] == assay]
xc50s = assay_dset_df['Standard Value'].values
max_xc50 = max(xc50s)
max_xc50s.append(max_xc50)
min_xc50 = min(xc50s)
min_xc50s.append(min_xc50)
relations = assay_dset_df['Standard Relation'].values
num_eq.append(sum(relations == '='))
num_lt.append(sum(relations == '<'))
num_gt.append(sum(relations == '>'))
num_max_xc50.append(sum(xc50s == max_xc50))
num_min_xc50.append(sum(xc50s == min_xc50))
assay_df['num_eq'] = num_eq
assay_df['num_lt'] = num_lt
assay_df['num_gt'] = num_gt
assay_df['max_xc50'] = max_xc50s
assay_df['num_max_xc50'] = num_max_xc50
assay_df['min_xc50'] = min_xc50s
assay_df['num_min_xc50'] = num_min_xc50
# Flag assays that appear to report one-shot screening results only (because all values are left or
# right censored at the same threshold)
num_eq = np.array(num_eq)
num_lt = np.array(num_lt)
num_gt = np.array(num_gt)
max_xc50s = np.array(max_xc50s)
min_xc50s = np.array(min_xc50s)
num_max_xc50 = np.array(num_max_xc50)
num_min_xc50 = np.array(num_min_xc50)
one_shot = (num_eq == 0) & (num_lt > 0) & (num_gt > 0)
assay_df['one_shot'] = one_shot
# Flag assays that appear not to report left-censoring correctly (because no values are censored
# and there are multiple values at highest XC50)
no_left_censoring = (counts == num_eq) & (num_max_xc50 >= 5)
assay_df['no_left_censoring'] = no_left_censoring
# Flag assays that appear not to report right-censoring correctly (because no values are censored
# and there are multiple values at lowest XC50)
no_right_censoring = (counts == num_eq) & (num_min_xc50 >= 5)
assay_df['no_right_censoring'] = no_right_censoring
assay_file = "%s/stats/%s_%s_%s_assay_stats.csv" % (chembl_root, database, target, endpoint)
assay_df.to_csv(assay_file, index=False)
print("Wrote %s %s assay censoring statistics to %s" % (target, endpoint, assay_file))
# Now generate a "curated" version of the dataset
assay_dsets = []
for assay, is_one_shot, has_no_left_cens, has_no_right_cens in zip(assays, one_shot, no_left_censoring,
no_right_censoring):
# Skip over assays that appear to contain one-shot data
if is_one_shot:
print("Skipping apparent one-shot data from assay %s" % assay)
else:
assay_dset_df = dset_df[dset_df['Assay ChEMBL ID'] == assay].copy()
xc50s = assay_dset_df['Standard Value'].values
max_xc50 = max(xc50s)
min_xc50 = min(xc50s)
# Add censoring relations for rows that seem to need them
relations = assay_dset_df['Standard Relation'].values
log_relations = assay_dset_df['LogVarRelation'].values
if has_no_left_cens:
relations[xc50s == max_xc50] = '>'
log_relations[xc50s == max_xc50] = '<'
print("Adding missing left-censoring relations for assay %s" % assay)
if has_no_right_cens:
relations[xc50s == min_xc50] = '<'
log_relations[xc50s == min_xc50] = '>'
print("Adding missing right-censoring relations for assay %s" % assay)
assay_dset_df['Standard Relation'] = relations
assay_dset_df['LogVarRelation'] = log_relations
assay_dsets.append(assay_dset_df)
curated_df = pd.concat(assay_dsets, ignore_index=True)
return curated_df
# ----------------------------------------------------------------------------------------------------------------------
def curate_chembl_xc50_assays(database='ChEMBL25', species='human', force_update=False):
"""
Examine data from individual ChEMBL assays in each dataset to look for suspicious patterns of XC50
values and censoring relations. Add relations where they appear to be needed, and filter out data from
assays that seem to have only one-shot categorical data.
"""
chembl_root = data_dirs['ChEMBL']
filtered_dir = '%s/filtered' % chembl_root
curated_dir = '%s/curated' % chembl_root
os.makedirs(curated_dir, exist_ok=True)
targets = sorted(chembl_dsets.keys())
for target in targets:
if type(chembl_dsets[target]) == dict:
for endpoint, dset_name in chembl_dsets[target].items():
curated_file = "%s/%s_%s_%s_%s_curated.csv" % (curated_dir, database, target, endpoint, species)
if os.path.exists(curated_file) and not force_update:
print("\nCurated dataset %s already exists, skipping" % curated_file)
continue
print("\n\nCurating %s data for %s" % (endpoint, target))
dset_file = "%s/%s_filt.csv" % (filtered_dir, dset_name)
dset_df = pd.read_csv(dset_file, index_col=False)
curated_df = curate_chembl_xc50_assay(dset_df, target, endpoint)
curated_df.to_csv(curated_file, index=False)
print("Wrote %s" % curated_file)
# ----------------------------------------------------------------------------------------------------------------------
def upload_chembl_raw_data(dset_name, endpoint, title, description, tags,
assay_category, functional_area, target_type,
target='', database='ChEMBL25', activity='inhibition', species='human',
force_update=False):
"""
Upload a raw dataset to the datastore from the given data frame.
Returns the datastore OID of the uploaded dataset.
"""
raw_dir = '%s/raw' % data_dirs['ChEMBL']
raw_path = "%s/%s.txt" % (raw_dir, dset_name)
dset_df = pd.read_table(raw_path, sep=';', index_col=False)
bucket = 'public'
filename = '%s.csv' % dset_name
dataset_key = 'dskey_' + filename
kv = {
'activity': activity,
'assay_category': assay_category,
'assay_endpoint': endpoint,
'target_type': target_type,
'functional_area': functional_area,
'data_origin': database,
'species': species,
'file_category': 'experimental',
'curation_level': 'raw',
'matrix': 'in vitro',
'sample_type': 'in_vitro',
'id_col': 'Molecule ChEMBL ID',
'smiles_col': 'Smiles',
'response_col': 'Standard Value'}
if target != '':
kv['target'] = target
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
raw_meta = dsf.upload_df_to_DS(dset_df, bucket, filename=filename, title=title,
description=description,
tags=tags, key_values=kv, client=None, dataset_key=dataset_key,
override_check=True, return_metadata=True)
print("Uploaded raw dataset with key %s" % dataset_key)
else:
raw_meta = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
print("Raw dataset %s is already in datastore, skipping upload." % dataset_key)
raw_dset_oid = raw_meta['dataset_oid']
return raw_dset_oid
# ----------------------------------------------------------------------------------------------------------------------
def upload_chembl_curated_data(dset_name, endpoint, title, description, tags,
assay_category, functional_area, target_type,
target='', database='ChEMBL25', activity='inhibition', species='human',
raw_dset_oid=None, force_update=False):
"""
Upload a curated dataset to the datastore. Returns the datastore OID of the uploaded dataset.
"""
curated_dir = '%s/curated' % data_dirs['ChEMBL']
filtered_dir = '%s/filtered' % data_dirs['ChEMBL']
if target == '':
# This is a PK dataset, for which curation consists only of the initial filtering
filename = '%s_curated.csv' % dset_name
curated_file = "%s/%s_filt.csv" % (filtered_dir, dset_name)
else:
# This is a bioactivity dataset
filename = "%s_%s_%s_%s_curated.csv" % (database, target, endpoint, species)
curated_file = "%s/%s" % (curated_dir, filename)
dset_df = pd.read_csv(curated_file, index_col=False)
bucket = 'public'
dataset_key = 'dskey_' + filename
kv = {
'activity': activity,
'assay_category': assay_category,
'assay_endpoint': endpoint,
'target_type': target_type,
'functional_area': functional_area,
'data_origin': database,
'species': species,
'file_category': 'experimental',
'curation_level': 'curated',
'matrix': 'in vitro',
'sample_type': 'in_vitro',
'id_col': 'Molecule ChEMBL ID',
'smiles_col': 'rdkit_smiles',
'response_col': log_var_map[endpoint] }
if target != '':
kv['target'] = target
if raw_dset_oid is not None:
kv['source_file_id'] = raw_dset_oid
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
curated_meta = dsf.upload_df_to_DS(dset_df, bucket, filename=filename, title=title,
description=description,
tags=tags, key_values=kv, client=None, dataset_key=dataset_key,
override_check=True, return_metadata=True)
print("Uploaded curated dataset with key %s" % dataset_key)
else:
print("Curated dataset %s is already in datastore, skipping upload." % dataset_key)
curated_meta = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
curated_oid = curated_meta['dataset_oid']
return curated_oid
# ----------------------------------------------------------------------------------------------------------------------
def create_ml_ready_chembl_dataset(dset_name, endpoint, target='', species='human', active_thresh=None,
database='ChEMBL25', force_update=False):
"""
Average replicate values from the curated version of the given dataset to give one value
per unique compound. Select and rename columns to include only the ones we need for building
ML models. Save the resulting dataset to disk.
endpoint is IC50, AC50, CL or Solubility.
"""
curated_dir = '%s/curated' % data_dirs['ChEMBL']
filtered_dir = '%s/filtered' % data_dirs['ChEMBL']
ml_ready_dir = '%s/ml_ready' % data_dirs['ChEMBL']
os.makedirs(ml_ready_dir, exist_ok=True)
if target == '':
# This is a PK dataset, for which curation consists only of the initial filtering
curated_file = "%s/%s_filt.csv" % (filtered_dir, dset_name)
ml_ready_file = "%s/%s_ml_ready.csv" % (ml_ready_dir, dset_name)
else:
# This is a bioactivity dataset
curated_file = "%s/%s_%s_%s_%s_curated.csv" % (curated_dir, database, target, endpoint, species)
ml_ready_file = "%s/%s_%s_%s_%s_ml_ready.csv" % (ml_ready_dir, database, target, endpoint, species)
if os.path.exists(ml_ready_file) and not force_update:
return
dset_df = pd.read_csv(curated_file, index_col=False)
# Rename and select the columns we want from the curated dataset
param = log_var_map[endpoint]
agg_cols = ['compound_id', 'rdkit_smiles', 'relation', param]
colmap = {
'Molecule ChEMBL ID': 'compound_id',
'LogVarRelation': 'relation'
}
assay_df = dset_df.rename(columns=colmap)[agg_cols]
# Compute a single value and relational flag for each compound
ml_ready_df = curate.aggregate_assay_data(assay_df, value_col=param, active_thresh=active_thresh,
id_col='compound_id', smiles_col='rdkit_smiles',
relation_col='relation')
ml_ready_df.to_csv(ml_ready_file, index=False)
print("Wrote ML-ready data to %s" % ml_ready_file)
return ml_ready_df
# ----------------------------------------------------------------------------------------------------------------------
def upload_chembl_ml_ready_data(dset_name, endpoint, title, description, tags,
assay_category, functional_area, target_type,
target='', database='ChEMBL25', activity='inhibition', species='human',
curated_dset_oid=None, force_update=False):
"""
Upload a ML-ready dataset to the datastore, previously created by create_ml_ready_chembl_dataset.
Returns the datastore OID of the uploaded dataset.
"""
ml_ready_dir = '%s/ml_ready' % data_dirs['ChEMBL']
if target == '':
# This is a PK dataset
filename = '%s_ml_ready.csv' % dset_name
else:
# This is a bioactivity dataset
filename = "%s_%s_%s_%s_ml_ready.csv" % (database, target, endpoint, species)
ml_ready_file = "%s/%s" % (ml_ready_dir, filename)
dset_df = pd.read_csv(ml_ready_file, index_col=False)
bucket = 'public'
dataset_key = 'dskey_' + filename
kv = {
'activity': activity,
'assay_category': assay_category,
'assay_endpoint': endpoint,
'target_type': target_type,
'functional_area': functional_area,
'data_origin': database,
'species': species,
'file_category': 'experimental',
'curation_level': 'ml_ready',
'matrix': 'in vitro',
'sample_type': 'in_vitro',
'id_col': 'compound_id',
'smiles_col': 'base_rdkit_smiles',
'response_col': log_var_map[endpoint]
}
if target != '':
kv['target'] = target
if curated_dset_oid is not None:
kv['source_file_id'] = curated_dset_oid
ds_client = dsf.config_client()
if force_update or not dsf.dataset_key_exists(dataset_key, bucket, ds_client):
ml_ready_meta = dsf.upload_df_to_DS(dset_df, bucket, filename=filename, title=title,
description=description,
tags=tags, key_values=kv, client=None, dataset_key=dataset_key,
override_check=True, return_metadata=True)
print("Uploaded ML-ready dataset with key %s" % dataset_key)
else:
print("ML-ready dataset %s is already in datastore, skipping upload." % dataset_key)
ml_ready_meta = dsf.retrieve_dataset_by_datasetkey(dataset_key, bucket, ds_client, return_metadata=True)
ml_ready_oid = ml_ready_meta['dataset_oid']
return ml_ready_oid
# ----------------------------------------------------------------------------------------------------------------------
def chembl_dataset_curation_pipeline(dset_table_file, force_update=False):
"""
Run a series of ChEMBL datasets through the process of filtering, curation, and aggregation for use in
building machine learning models. Upload the raw, curated and ML-ready datasets to the datastore.
The datasets are described in the CSV file dset_table_file, which tabulates the attributes of each dataset
and the metadata to be included with the uploaded datasets.
"""
chembl_dir = data_dirs['ChEMBL']
raw_dir = "%s/raw" % chembl_dir
filt_dir = "%s/filtered" % chembl_dir
curated_dir = "%s/curated" % chembl_dir
ml_ready_dir = "%s/ml_ready" % chembl_dir
os.makedirs(filt_dir, exist_ok=True)
table_df = pd.read_csv(dset_table_file, index_col=False)
table_df = table_df.fillna('')
for i, dset_name in enumerate(table_df.Dataset.values):
endpoint = table_df.endpoint.values[i]
target = table_df.target.values[i]
assay_category = table_df.assay_category.values[i]
functional_area = table_df.functional_area.values[i]
target_type = table_df.target_type.values[i]
activity = table_df.activity.values[i]
species = table_df.species.values[i]
database = table_df.database.values[i]
title = table_df.title.values[i]
description = table_df.description.values[i]
tags = ['public', 'raw']
# Upload the raw dataset as-is
raw_dset_oid = upload_chembl_raw_data(dset_name, endpoint, title, description, tags,
assay_category, functional_area, target_type,
target=target, database=database, activity=activity, species=species,
force_update=force_update)
# First curation step: Filter dataset to remove rows with missing IDs, SMILES, values, etc.
raw_path = "%s/%s.txt" % (raw_dir, dset_name)
filt_path = "%s/%s_filt.csv" % (filt_dir, dset_name)
if not os.path.exists(filt_path) or force_update:
dset_df = pd.read_table(raw_path, sep=';', index_col=False)
print("Filtering dataset %s" % dset_name)
filt_df = filter_chembl_dset(dset_df)
filt_df.to_csv(filt_path, index=False)
else:
filt_df = pd.read_csv(filt_path, index_col=False)
print("Filtered dataset file %s already exists" % filt_path)
# Second curation step: Fix or remove anomalous data. Currently this is only done for
# bioactivity data.
if target != '':
curated_file = "%s/%s_%s_%s_%s_curated.csv" % (curated_dir, database, target, endpoint, species)
if not os.path.exists(curated_file) or force_update:
print("Curating %s data for %s" % (endpoint, target))
curated_df = curate_chembl_xc50_assay(filt_df, target, endpoint, database=database)
curated_df.to_csv(curated_file, index=False)
else:
curated_df = pd.read_csv(curated_file, index_col=False)
print("Curated %s dataset file for %s already exists" % (endpoint, target))
description += "\nCurated using public_data_curation functions filter_chembl_dset and curate_chembl_xc50_assay."
else:
curated_df = filt_df
description += "\nCurated using public_data_curation function filter_chembl_dset."
title = title.replace('Raw', 'Curated')
tags = ['public', 'curated']
# Upload curated data to datastore
curated_dset_oid = upload_chembl_curated_data(dset_name, endpoint, title, description, tags,
assay_category, functional_area, target_type,
target=target, database=database, activity=activity, species=species,
raw_dset_oid=raw_dset_oid, force_update=force_update)
# Prepare ML-ready dataset
if target == '':
ml_ready_file = "%s/%s_ml_ready.csv" % (ml_ready_dir, dset_name)
else:
ml_ready_file = "%s/%s_%s_%s_%s_ml_ready.csv" % (ml_ready_dir, database, target, endpoint, species)
if not os.path.exists(ml_ready_file) or force_update:
print("Creating ML-ready dataset file %s" % ml_ready_file)
ml_ready_df = create_ml_ready_chembl_dataset(dset_name, endpoint, target=target, species=species, active_thresh=None,
database=database, force_update=force_update)
else:
ml_ready_df = pd.read_csv(ml_ready_file, index_col=False)
print("ML-ready dataset file %s already exists" % ml_ready_file)
title = title.replace('Curated', 'ML-ready')
description += "\nAveraged for ML model building using public_data_curation.create_ml_ready_dataset."
tags = ['public', 'ML-ready']
# Upload ML-ready data to the datastore
ml_ready_dset_oid = upload_chembl_ml_ready_data(dset_name, endpoint, title, description, tags,
assay_category, functional_area, target_type,
target=target, database=database, activity=activity, species=species,
curated_dset_oid=curated_dset_oid, force_update=force_update)
print("Done with dataset %s\n" % dset_name)
# ----------------------------------------------------------------------------------------------------------------------
def chembl_replicate_variation(dset_df, value_col='pIC50', dset_label='', min_freq=3, num_assays=4):
"""
Plot the variation among measurements in a ChEMBL dataset for compounds with multiple measurements
from the same or different ChEMBL assay IDs.
"""
rep_df = curate.freq_table(dset_df, 'rdkit_smiles', min_freq=min_freq)
rep_df['mean_value'] = [np.mean(dset_df[dset_df.rdkit_smiles == s][value_col].values)
for s in rep_df.rdkit_smiles.values]
rep_df['std_value'] = [np.std(dset_df[dset_df.rdkit_smiles == s][value_col].values)
for s in rep_df.rdkit_smiles.values]
rep_df = rep_df.sort_values(by='mean_value')
nrep = rep_df.shape[0]
rep_df['cmpd_id'] = ['C%05d' % i for i in range(nrep)]
rep_dset_df = dset_df[dset_df.rdkit_smiles.isin(rep_df.rdkit_smiles.values)].merge(
rep_df, how='left', on='rdkit_smiles')
# Label the records coming from the num_assays most common assays with the first part of
# their assay descriptions; label the others as 'Other'.
assay_df = curate.freq_table(rep_dset_df, 'Assay ChEMBL ID')
other_ids = assay_df['Assay ChEMBL ID'].values[num_assays:]
assay_labels = np.array([desc[:30]+'...' for desc in rep_dset_df['Assay Description'].values])
assay_labels[rep_dset_df['Assay ChEMBL ID'].isin(other_ids)] = 'Other'
rep_dset_df['Assay'] = assay_labels
fig, ax = plt.subplots(figsize=(10,15))
sns.stripplot(x=value_col, y='cmpd_id', hue='Assay', data=rep_dset_df,
order=rep_df.cmpd_id.values)
ax.set_title(dset_label)
return rep_df
# ----------------------------------------------------------------------------------------------------------------------
# Filename templates for curated bioactivity datasets, with a %s field to plug in the target or property name. Probably
# we should just rename the files from all data sources to follow the standard template:
# (database)_(target)_(endpoint)_(species)_curated.csv.
curated_dset_file_templ = dict(
ChEMBL="ChEMBL25_%s_IC50_human_curated.csv",
DTC="%s_DTC_curated.csv",
Excape="%s_Excape_curated.csv"
)
# ----------------------------------------------------------------------------------------------------------------------
def chembl_jak_replicate_variation(min_freq=2, num_assays=4):
"""
Plot variation among replicate measurements for compounds in the JAK datasets
"""
jak_genes = ['JAK1', 'JAK2', 'JAK3']
db = 'ChEMBL'
dsets = {}
for gene in jak_genes:
dset_file = "%s/curated/%s" % (data_dirs[db], curated_dset_file_templ[db] % gene)
dset_df = pd.read_csv(dset_file, index_col=False)
dsets[gene] = chembl_replicate_variation(dset_df, value_col='pIC50', min_freq=min_freq,
dset_label=gene,
num_assays=num_assays)
return dsets
# ----------------------------------------------------------------------------------------------------------------------
def chembl_assay_bias(target, endpoint, database='ChEMBL25', species='human', min_cmp_assays=5, min_cmp_cmpds=10):
"""
Investigate systematic biases among assays for target, by selecting data for compounds with data from
multiple assays and computing deviations from mean for each compound; then reporting mean deviation for
each assay.
"""
curated_dir = '%s/curated' % data_dirs['ChEMBL']
curated_file = "%s/%s_%s_%s_%s_curated.csv" % (curated_dir, database, target, endpoint, species)
dset_df = pd.read_csv(curated_file, index_col=False)
assay_df = curate.labeled_freq_table(dset_df, ['Assay ChEMBL ID', 'Assay Description', 'BAO Label'], min_freq=2)
assays = assay_df['Assay ChEMBL ID'].values
print("\nChecking bias for ChEMBL %s %s dataset:" % (target, endpoint))
if assay_df.shape[0] == 1:
print("Dataset %s has data for one assay only; skipping." % curated_file)
return None
# Restrict to data from assays with at least 2 rows of data
dset_df = dset_df[dset_df['Assay ChEMBL ID'].isin(assay_df['Assay ChEMBL ID'].values.tolist())]
# Tabulate overall mean and SD and compound count for each assay
log_var = log_var_map[dset_df['Standard Type'].values[0]]
mean_values = [np.mean(dset_df[dset_df['Assay ChEMBL ID'] == assay][log_var].values) for assay in assays]
stds = [np.std(dset_df[dset_df['Assay ChEMBL ID'] == assay][log_var].values) for assay in assays]
ncmpds = [len(set(dset_df[dset_df['Assay ChEMBL ID'] == assay]['rdkit_smiles'].values)) for assay in assays]
assay_df['num_cmpds'] = ncmpds
assay_df['mean_%s' % log_var] = mean_values
assay_df['std_%s' % log_var] = stds
assay_df = assay_df.rename(columns={'Count': 'num_rows'})
# Select compounds with data from multiple assays. Compute mean values for each compound.
# Then compute deviations from mean for each assay for each compound.
assay_devs = {assay : [] for assay in assays}
cmp_assays = {assay : set() for assay in assays}
cmp_cmpds = {assay : 0 for assay in assays}
rep_df = curate.freq_table(dset_df, 'rdkit_smiles', min_freq=2)
for smiles in rep_df.rdkit_smiles.values:
sset_df = dset_df[dset_df.rdkit_smiles == smiles]
sset_assays = sset_df['Assay ChEMBL ID'].values
sset_assay_set = set(sset_assays)
num_assays = len(set(sset_assays))
if num_assays > 1:
vals = sset_df[log_var].values
mean_val = np.mean(vals)
deviations = vals - mean_val
for assay, dev in zip(sset_assays, deviations):
assay_devs[assay].append(dev)
cmp_assays[assay] |= (sset_assay_set - set([assay]))
cmp_cmpds[assay] += 1
assay_df['num_cmp_assays'] = [len(cmp_assays[assay]) for assay in assays]
assay_df['num_cmp_cmpds'] = [cmp_cmpds[assay] for assay in assays]
mean_deviations = [np.mean(assay_devs[assay]) for assay in assays]
assay_df['mean_deviation'] = mean_deviations
assay_df = assay_df.sort_values(by='mean_deviation', ascending=False)
assay_file = "%s/stats/%s_%s_assay_bias.csv" % (data_dirs['ChEMBL'], target, endpoint)
assay_df.to_csv(assay_file, index=False)
# Flag assays compared against at least min_cmp_assays other assays over min_cmp_cmpds compounds
flag_df = assay_df[(assay_df.num_cmp_assays >= min_cmp_assays) & (assay_df.num_cmp_cmpds >= min_cmp_cmpds)]
print("For %s %s data: %d assays with robust bias data:" % (target, endpoint, flag_df.shape[0]))
if flag_df.shape[0] > 0:
print(flag_df)
print("Wrote assay bias statistics to %s" % assay_file)
return assay_df
# ----------------------------------------------------------------------------------------------------------------------
def chembl_xc50_assay_bias():
"""
Tabulate systematic biases for all the ChEMBL XC50 datasets
"""
targets = sorted(chembl_dsets.keys())
for target in targets:
if type(chembl_dsets[target]) == dict:
for endpoint in chembl_dsets[target].keys():
bias_df = chembl_assay_bias(target, endpoint)
# ----------------------------------------------------------------------------------------------------------------------
# Functions for comparing datasets from different sources
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
def compare_jak_dset_compounds():
"""
Plot Venn diagrams for each set of public JAK datasets showing the numbers of compounds in common
between them
"""
jak_genes = ['JAK1', 'JAK2', 'JAK3']
dbs = sorted(data_dirs.keys())
for gene in jak_genes:
dset_smiles = []
for db in dbs:
dset_file = "%s/curated/%s" % (data_dirs[db], curated_dset_file_templ[db] % gene)
dset_df = pd.read_csv(dset_file, index_col=False)
dset_smiles.append(set(dset_df.rdkit_smiles.values))
fig, ax = plt.subplots(figsize=(8,8))
venn3(dset_smiles, dbs)
plt.title(gene)
plt.show()
# ----------------------------------------------------------------------------------------------------------------------
def find_jak_dset_duplicates():
"""
Check for potential duplication of records within and between datasets. A record is a potential
duplicate if it has the same base SMILES string, IC50 value and standard relation.
"""
colmap = dict(
ChEMBL={'pIC50': 'pIC50',
'LogVarRelation': 'Relation'
},
DTC={'pIC50': 'pIC50',
'pIC50_relation': 'Relation'
},
Excape={'pXC50': 'pIC50'
} )
jak_genes = ['JAK1', 'JAK2', 'JAK3']
dbs = sorted(data_dirs.keys())
for gene in jak_genes:
dedup = {}
smiles_set = {}
for db in dbs:
dset_file = "%s/curated/%s" % (data_dirs[db], curated_dset_file_templ[db] % gene)
dset_df = pd.read_csv(dset_file, index_col=False)
dset_df = dset_df.rename(columns=colmap[db])
if db == 'Excape':
dset_df['Relation'] = "="
dset_df = dset_df[['Relation', 'pIC50', 'rdkit_smiles']]
is_dup = dset_df.duplicated().values
print("Within %s %s dataset, %d/%d rows are potential duplicates" % (db, gene, sum(is_dup), dset_df.shape[0]))
dedup[db] = dset_df.drop_duplicates()
smiles_set[db] = set(dset_df.rdkit_smiles.values)
print('\n')
for i, db1 in enumerate(dbs[:2]):
for db2 in dbs[i+1:]:
combined_df = pd.concat([dedup[db1], dedup[db2]], ignore_index=True)
is_dup = combined_df.duplicated().values
n_cmn_smiles = len(smiles_set[db1] & smiles_set[db2])
print("Between %s and %s %s datasets, %d common SMILES, %d identical responses" % (db1, db2, gene,
n_cmn_smiles,
sum(is_dup)))
print('\n')
'''
|
999,581 | 036af910011091bcc2ef0ebb9acfc81886a7bf8c | #!/usr/bin/env python
import os
import sys
import argparse
import collections
import numpy as np
import cv2
import math
import random
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
sys.path.insert(0, "../../")
import models
from VideoTemporalPrediction import VideoTemporalPrediction
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
parser = argparse.ArgumentParser(description='PyTorch Two-Stream Action Recognition - Test')
parser.add_argument('-s', '--split', default=2, type=int, metavar='S',
help='which split of data to work on (default: 1)')
def softmax(x):
y = [math.exp(k) for k in x]
sum_y = math.fsum(y)
z = [k/sum_y for k in y]
return z
def main():
args = parser.parse_args()
model_path = '../../checkpoints/flow_s'+str(args.split)+'.pth.tar'
data_path = '../../datasets/ucf101_frames'
#data_dir = "~/UCF101/ucf101_flow_img_tvl1_gpu"
start_frame = 0
num_categories = 101
model_start_time = time.time()
params = torch.load(model_path)
temporal_net = models.flow_resnet152(pretrained=False, num_classes=101)
temporal_net.load_state_dict(params['state_dict'])
temporal_net.cuda()
temporal_net.eval()
model_end_time = time.time()
model_time = model_end_time - model_start_time
print("Action recognition temporal model is loaded in %4.4f seconds." % (model_time))
val_file = "./splits/val_split%d.txt"%(args.split)
f_val = open(val_file, "r")
val_list = f_val.readlines()
print("we got %d test videos" % len(val_list))
line_id = 1
match_count = 0
result_list = []
for line in val_list:
line_info = line.split(" ")
clip_path = os.path.join(data_path,line_info[0])
num_frames = int(line_info[1])
input_video_label = int(line_info[2])
spatial_prediction = VideoTemporalPrediction(
clip_path,
temporal_net,
num_categories,
start_frame,
num_frames)
avg_spatial_pred_fc8 = np.mean(spatial_prediction, axis=1)
# print(avg_spatial_pred_fc8.shape)
result_list.append(avg_spatial_pred_fc8)
# avg_spatial_pred = softmax(avg_spatial_pred_fc8)
pred_index = np.argmax(avg_spatial_pred_fc8)
print("flow split "+str(args.split)+", sample %d/%d: GT: %d, Prediction: %d" % (line_id, len(val_list), input_video_label, pred_index))
if pred_index == input_video_label:
match_count += 1
line_id += 1
print(match_count)
print(len(val_list))
print("Accuracy is %4.4f" % (float(match_count)/len(val_list)))
np.save("ucf101_flow_resnet152_s"+str(args.split)+".npy", np.array(result_list))
if __name__ == "__main__":
main()
|
999,582 | 0c5490cb2a9c5920a00a05c8c78a2e6fa8cf12d7 | import gin
import tensorflow as tf
import pandas as pd
@gin.configurable
def preprocess(image):
"""
PURPOSE: Dataset preprocessing: cropping and resizing
Args:
image: image to be preprocessed
"""
image_cbb = tf.image.crop_to_bounding_box(image, 0, 15, 256, 209)
image_resized = tf.image.resize(image_cbb, (256, 256))
return image_resized
def resampling(df_imbalanced, frac=1):
"""
Args:
df_imbalanced: imbalanced data frame of paths and labels
frac: frac argument in dataframe.sample(method)
Returns: Resampled Dataframe
"""
df_imbalanced = df_imbalanced.astype({'Retinopathy grade': int})
df_minority = df_imbalanced[df_imbalanced['Retinopathy grade'] == 0]
df_majority = df_imbalanced[df_imbalanced['Retinopathy grade'] == 1]
# Calculate the imbalance of data, minority class frequency- majority class frequency
difference = len(df_majority) - len(df_minority)
# print(difference)
df_sampled_from_minority = df_minority.sample(n=difference)
# print(train_df_new_0.head())
# concatenate the minority class, majority class and newly sampled class from minority
df_balanced_data = pd.concat([df_minority, df_majority, df_sampled_from_minority], axis=0)
# print(len(train_df))
# shuffle the resampled data
df_balanced_data = df_balanced_data.sample(frac=frac)
# convert the labels to strings to be accepted by flow from dataframe
df_balanced_data = df_balanced_data.astype({'Retinopathy grade': str})
return df_balanced_data
|
999,583 | 5824a665eaed22aa7131c756db5711fc2e7c07ad | from Logic.CRUD import add
from Tests.test_ALL import run_all_tests
from UI.console import run_console
def main():
lista=[]
undol=[]
redol=[]
lista=add('1', 'Carmen', 'economy', 1230, 'da',lista,undol,redol)
lista=add('2', 'Marius', 'economy plus', 250, 'nu',lista,undol,redol)
lista=add('3', 'Marius', 'economy', 250, 'nu',lista,undol,redol)
lista=add('4', 'Claudiu', 'business', 1023, 'da',lista,undol,redol)
run_console(lista,undol,redol)
run_all_tests()
main() |
999,584 | a184b8063cf98b1a11d63eeb2cf3883c655777c1 | import requests
import json
def getUpdates(offset, token): # returns the list of messages
getUpdates = 'https://api.telegram.org/bot'+ token + '/getUpdates'
updates = requests.get(getUpdates + "?offset={}&timeout=60".format(offset), timeout=120)
data = updates.json()
messages = data['result']
return messages
class Sender(object):
"""Sends text message or video when text or youtube link received respectively"""
def __init__(self, token):
self.token = token
def keyboard(self, formats_list):
keyboard = {'inline_keyboard': [[],[]]}
mid = int(len(formats_list) / 2)
for format in formats_list[:mid]:
dic = {'text': format, 'callback_data': format}
keyboard['inline_keyboard'][0].append(dic)
for format in formats_list[mid:]:
dic = {'text': format, 'callback_data': format}
keyboard['inline_keyboard'][1].append(dic)
return "{}".format(json.dumps(keyboard))
def sendMessage(self, chat_id=None, text=None, reply_markup=None):
token = self.token
sendMessage = 'https://api.telegram.org/bot'+ token + '/sendMessage'
return requests.post(sendMessage, params={'chat_id': chat_id, 'text': text, 'reply_markup': reply_markup})
def sendVideo (self, chat_id=None, video=None):
token = self.token
sendMessage = 'https://api.telegram.org/bot' + token + '/sendVideo'
return requests.post(sendMessage, params={'chat_id':chat_id}, files={'video': video})
|
999,585 | 77ce1b8d16d9822f770f035b8c54179e102e6f47 | from event_queue import *
from population_graph import *
from graph_node import *
import numpy as np
import matplotlib.pyplot as plt
def build_tree(infect_rate,cure_rate,degree, gens):
tree = PopulationGraph(infect_rate,cure_rate)
root = tree.add_node(GraphNode(None))
counter = 0
tree_helper(tree,root,degree,gens,counter)
temp_node = tree.add_node(GraphNode(None))
return tree, root, temp_node
def tree_helper(tree,node,degree,gens,counter):
if counter < gens:
for i in range(degree):
end = tree.add_node(GraphNode(None))
tree.add_connection(node,end)
tree_helper(tree, end, degree, gens, counter+1)
def run_simulation(tree, root, temp_node, queue, max_iterations):
current_time = 0
num_iterations = 0
queue.insert(current_time, Event(1,temp_node,root))
while(not(queue.is_empty()) and num_iterations <= max_iterations):
num_iterations += 1
current_time, event = queue.pop_event()
if event.state == 0:
tree.cure(event.outgoing)
event.outgoing.add_cure_time(current_time)
if event.state == 1:
if event.outgoing != temp_node:
temp_infect = tree.infect_distribution_sample(None,'exponential',use_default=True)
if temp_infect + current_time < event.outgoing.recover_time:
queue.insert(current_time + temp_infect, Event(1,event.incoming,event.outgoing))
if event.incoming.infected_state == 0:
tree.infect(event.incoming)
event.incoming.add_infection_time(current_time)
temp_recover = tree.cure_distribution_sample(None,'exponential',use_default=True)
event.incoming.recover_time = current_time + temp_recover
queue.insert(event.incoming.recover_time, Event(0,event.incoming,temp_node))
for node in event.incoming.get_neighbors():
temp_infect = tree.infect_distribution_sample(None,'exponential',use_default=True)
if temp_infect + current_time < event.incoming.recover_time:
queue.insert(temp_infect+current_time,Event(1,event.incoming,node))
print(str(current_time) + " | " + str(len(tree.infected_nodes)) + " | " + str(num_iterations))
return current_time
infect_param = 1.25 #aka lambda
cure_rate = 1
degree = 4
gens = 7
max_iterations = 5e6
num_simulations=25
finish_times = []
root_hits = []
for i in range(num_simulations):
tree, root, temp_node = build_tree(infect_param,cure_rate,degree,gens)
queue = EventQueue(tree)
finish_time = run_simulation(tree,root,temp_node,queue,max_iterations)
finish_times.append(finish_time)
root_hits.append(len(root.infection_times))
print(finish_times)
print(root_hits)
plt.figure(1)
plt.hist(finish_times)
plt.title("Infection Survival Times")
plt.plot()
plt.figure(2)
plt.hist(root_hits)
plt.title("Number of Root Hits")
plt.plot() |
999,586 | 8bbd5498368f5ed0368b4d04edc3c41239bb94aa | # Generated by Django 2.1.3 on 2019-04-26 16:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mana', '0007_auto_20190426_2103'),
]
operations = [
migrations.AlterField(
model_name='customers',
name='customer_id',
field=models.IntegerField(choices=[(1, 'yes'), (2, 'no')]),
),
]
|
999,587 | 20f1f65792b983bc7ebb70266a0888c3a839c27e | # -*- coding: utf-8 -*-
"""
Provide a wrapper and helpers for OPEN_STACK library.
See https://api-explorer.scalr.com/ for API details
"""
from __future__ import absolute_import
import json
import logging
import re
import requests
import toil.provider.base
import toil
import toil.util.decorator
logger = logging.getLogger(__name__)
class OpenStackLib(toil.provider.base.BaseProvider):
"""
Class for Scalr functionality.
Properties :
config: dict with configuration data
"""
def __init__(self, toil, config):
super(OpenStackLib, self).__init__(toil, config)
def session(self, profile='default'):
"""
Create a Scalr session.
Args:
profile (): the profile defined in config to use
Returns:
ScalrApiSession
"""
if profile in self.config:
self.configure_proxy()
session = OpenStackApiSession(self._toil, profile, self.config[profile])
return session
else:
raise toil.CloudException(
"profile '{profile}' not defined in config {config}".format(profile=profile, config=self.config))
class OpenStackApiSession(requests.Session):
"""
uses the requests library for an http session
"""
def __init__(self, toil, profile, config):
self._profile = profile
self._config = config
self._toil = toil
self._token = None
self._token_x_subject = None
self._authenticating = False
self.URL_AUTH_TOKEN = "{end_point}/auth/tokens".format(end_point=self._config['auth_url'])
self._services = {}
super(OpenStackApiSession, self).__init__()
self.get_auth_token()
def prepare_request(self, request):
"""
Ensures the url includes the config api.
Implements Scalr signing requirements.
Args:
request (str):
Returns: request
"""
request = super(OpenStackApiSession, self).prepare_request(request)
if not self._authenticating:
request.headers.update({
"X-Auth-Token": self._token_x_subject,
"Content-Type": 'application/json'
})
logger.debug("URL: %s", request.url)
return request
def get_auth_token(self):
"""
get a auth token from openstack
Args:
*args ():
**kwargs ():
Returns: requests.Response
"""
self._authenticating = True
auth_data = {
"auth": {
"identity": {
"methods": [
"password"
],
"password": {
"user": {
"domain": {
"name": self._config['user_domain'] if 'user_domain' in self._config else self._config[
'domain']
},
"name": self._config['user'],
"password": self._config['password']
}
}
},
"scope": {
"project": {
"domain": {
"name": self._config['domain']
},
"name": self._config['project'],
}
}
}
}
# profile = prof,
# user_agent = 'toil',
# auth_url = self._config['auth_url'],
# project_name = self._config['project'],
# project_domain_name = self._config['domain'],
# user_domain_name = self._config['domain'],
# username = self._config['user'],
# password = self._config['password']
response = self.post(None, self.URL_AUTH_TOKEN, data=json.dumps(auth_data))
self._authenticating = False
json_response = response.json()
self._token = json_response['token']
self._token_x_subject = response.headers['x-subject-token']
catalog = json_response['token']['catalog']
for service in catalog:
self._services[service['name']] = service
def get_services(self):
"""
Returns: Dictionary of services
"""
return self._services
# def request(self, *args, **kwargs):
# """
# request data from scalr, converts response to json
# Args:
# *args ():
# **kwargs ():
#
# Returns: requests.Response
# """
# res = super(OpenStackApiSession, self).request(*args, **kwargs)
# logger.debug("%s - %s", " ".join(args), res.status_code)
# try:
# errors = res.json().get("errors", None)
# if errors is not None:
# for error in errors:
# logger.warning("API Error (%s): %s", error["code"], error["message"])
# except ValueError:
# logger.error("Received non-JSON response from API!")
# res.raise_for_status()
# logger.debug("Received response: %s", res.text)
# return res
def list(self, service_name, url, service_type='public', **kwargs):
"""
call the service until all data is collected
Args:
path (str):
**kwargs ():
Returns: List
"""
data = []
path = ''
next_url = url
body = self.get(service_name, url, service_type, **kwargs).json()
regex = re.compile('[^a-zA-Z0-9]')
while body is not None:
next_url = None
for body_key in body.keys():
# looking for a key to match url. for example: servers in /servers?limit=10
if body_key in url:
page_data = body[body_key]
data.extend(page_data)
elif regex.sub('', body_key) in regex.sub('', url):
# nova /v2.0/security-groups returns security_groups as body key
page_data = body[body_key]
data.extend(page_data)
elif 'next' in body_key:
# looking for a next link, in case all data was not returned
# glance uses this style
next_url = self.get_service_url(service_name, body['next'], service_type)
elif 'link' in body_key:
# looking for a next link, in case all data was not returned
# nova uses this style
links = body[body_key]
for link in links:
if 'rel' in link and link['rel'] == 'next':
next_url = link['href']
if 'https' not in next_url:
next_url = next_url.replace('http', 'https')
if next_url is not None:
body = self.get(None, next_url).json()
else:
body = None
return data
def create(self, service_name, url, service_type='public', **kwargs):
return self.post(service_name, url, service_type, **kwargs)
def fetch(self, service_name, url, service_type='public', **kwargs):
return self.get(service_name, url, service_type, **kwargs)
def delete(self, service_name, url, service_type='public', **kwargs):
if service_name is not None:
service_url = self.get_service_url(service_name, url, service_type)
return super(OpenStackApiSession, self).delete(service_url, **kwargs)
else:
return super(OpenStackApiSession, self).delete(str(url), **kwargs)
def post(self, service_name, url, service_type='public', **kwargs):
if service_name is not None:
service_url = self.get_service_url(service_name, url, service_type)
return super(OpenStackApiSession, self).post(service_url, **kwargs)
else:
return super(OpenStackApiSession, self).post(url, **kwargs)
def put(self, service_name, url, service_type='public', **kwargs):
if service_name is not None:
service_url = self.get_service_url(service_name, url, service_type)
return super(OpenStackApiSession, self).put(service_url, **kwargs)
else:
return super(OpenStackApiSession, self).put(url, **kwargs)
def patch(self, service_name, url, service_type='public', **kwargs):
if service_name is not None:
service_url = self.get_service_url(service_name, url, service_type)
return super(OpenStackApiSession, self).patch(service_url, **kwargs)
else:
return super(OpenStackApiSession, self).patch(str(url), **kwargs)
@toil.util.decorator.retry(3, requests.exceptions.RequestException)
def get(self, service_name, url, service_type='public', **kwargs):
if service_name is not None:
service_url = self.get_service_url(service_name, url, service_type)
return super(OpenStackApiSession, self).get(service_url, **kwargs)
else:
return super(OpenStackApiSession, self).get(str(url), **kwargs)
@toil.util.decorator.retry(3, requests.exceptions.RequestException)
def head(self, service_name, url, service_type='public', **kwargs):
if service_name is not None:
service_url = self.get_service_url(service_name, url, service_type)
return super(OpenStackApiSession, self).head(service_url, **kwargs)
else:
return super(OpenStackApiSession, self).head(str(url), **kwargs)
def get_service_url(self, service_name, url, service_type='public'):
service_endpoint = None
if service_name not in self._services:
logger.debug("service '{service_name}' not defined in available services {services}".format(
service_name=service_name, services=self._services.keys()))
# raise toil.CloudException("service '{service_name}' not defined in available services {services}".format(service_name=service_name,services=self._services.keys()))
else:
for endpoint in self._services[service_name]['endpoints']:
if endpoint['interface'] == service_type:
service_endpoint = endpoint
break
if service_endpoint is not None:
logger.debug(
"service:{service_name} service_type:{service_type} url:{url})".format(service_name=service_name,
service_type=service_type,
url=service_endpoint['url']))
# return service_endpoint['url'] + '/' + service_name + url
return service_endpoint['url'] + url
else:
logger.debug("service '{service_type} nterface not defined in {service_name}'")
# raise toil.CloudException( "service '{service_type} nterface not defined in {service_name}'")
url = "{end_point}{url}".format(end_point=self._config['auth_url'], url=url)
return url
|
999,588 | 8da58c8dce2a62e536f524a1217cb70394912781 | Enonce = """
Longest Collatz sequence
Problem 14
The following iterative sequence is defined for the set of positive integers:
n -> n/2 (n is even)
n -> 3n + 1 (n is odd)
Using the rule above and starting with 13, we generate the following sequence:
13 -> 40 -> 20 -> 10 -> 5 -> 16 -> 8 -> 4 -> 2 -> 1
It can be seen that this sequence (starting at 13 and finishing at 1) contains 10 terms. Although it has not been proved yet (Collatz Problem), it is thought that all starting numbers finish at 1.
Which starting number, under one million, produces the longest chain?
NOTE: Once the chain starts the terms are allowed to go above one million.
"""
# Iterator style
class CollatzSequence:
def __init__(self, start=1):
self.current = start
def __iter__(self):
return self
def __next__(self):
if self.current == None :
raise StopIteration
return self.value()
def value(self):
save = self.current
if self.current == 1 :
self.current = None
elif self.current%2 == 0:
self.current = int(self.current/2)
else:
self.current = int(3*self.current+1)
return save
def main():
print(40*"=")
print(Enonce)
print(40*"-")
import time
Solution_Sequence = list([1])
Solution = 1
# 1261.26 secondes
max = 999_999 #100_000 #13
sequence = list()
start = time.perf_counter()
for index in range(max, 0, -1):
sequence.clear()
for collatz in CollatzSequence(index):
sequence.append(collatz)
if collatz in Solution_Sequence: # End of Collatz sequence already known
if Solution_Sequence.index(collatz) < sequence.index(collatz):
sequence = sequence + Solution_Sequence[Solution_Sequence.index(collatz)+1:]
Solution_Sequence = sequence.copy()
Solution = index
print(f"{round(time.perf_counter()-start,1)}s : New max with {Solution}")
print(f"""Length Collatz sequence for {index} = {len(sequence)} :
{sequence}""")
else:
sequence.append(f"See Collatz({Solution})")
break
#print(f"""Length Collatz sequence for {index} = {len(sequence)} :
#{sequence}""")
end = time.perf_counter()
print(f"{Solution} en {round(end-start,2)} secondes")
print(f"Length of Collatz sequence with {Solution} = {len(Solution_Sequence)}")
print(40*"-")
print("Solution = {}".format(Solution))
print(40*"=")
if __name__ == "__main__":
# execute only if run as a script
main()
|
999,589 | fd531f9e88120d4aec5fb81e05c9ce98f61c57e2 | from django.db import models
class data(models.Model):
name = models.CharField(max_length = 40)
description = models.TextField(max_length = 200)
Subscriptions = models.TextField(max_length = 200)
payment_due = models.DateField()
payment_amount = models.TextField()
image = models.ImageField(blank=True, null=True)
|
999,590 | 894979c2691f4d32d77f5d47f106d0de759de1ac | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
import asyncio
import pytest
import logging
import json
import sys
import traceback
from dev_utils import get_random_dict
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
# TODO: add tests for various application properties
# TODO: is there a way to call send_c2d so it arrives as an object rather than a JSON string?
@pytest.mark.describe("Client C2d")
class TestReceiveC2d(object):
@pytest.mark.it("Can receive C2D")
@pytest.mark.quicktest_suite
async def test_receive_c2d(self, session, service_helper, event_loop, leak_tracker):
leak_tracker.set_initial_object_list()
message = json.dumps(get_random_dict())
queue = asyncio.Queue()
async def listener(sess):
try:
async with sess.messages() as messages:
async for message in messages:
await queue.put(message)
except asyncio.CancelledError:
# In python3.7, asyncio.CancelledError is an Exception. We don't
# log this since it's part of the shutdown process. After 3.7,
# it's a BaseException, so it just gets caught somewhere else.
raise
except Exception as e:
# Without this line, exceptions get silently ignored until
# we await the listener task.
logger.error("Exception")
logger.error(traceback.format_exception(e))
raise
async with session:
listener_task = asyncio.create_task(listener(session))
await service_helper.send_c2d(message, {})
received_message = await queue.get()
assert session.connected is False
with pytest.raises(asyncio.CancelledError):
await listener_task
listener_task = None
assert received_message.payload == message
del received_message
leak_tracker.check_for_leaks()
@pytest.mark.it("Can receive C2D using anext")
@pytest.mark.skip("leaks")
@pytest.mark.quicktest_suite
@pytest.mark.skipif(
sys.version_info.major == 3 and sys.version_info.minor < 10,
reason="anext was not introduced until 3.10",
)
async def test_receive_c2d_using_anext(self, session, service_helper, event_loop, leak_tracker):
leak_tracker.set_initial_object_list()
message = json.dumps(get_random_dict())
async with session:
async with session.messages() as messages:
await service_helper.send_c2d(message, {})
received_message = await anext(messages)
assert session.connected is False
assert received_message.payload == message
del received_message
leak_tracker.check_for_leaks()
|
999,591 | 37db8877267adcd30bd0ba76075fe0b11f2d47ed | from processdata import CSVData
from utils.normalize import NormalizeWord
def main():
# path to data"
path_data = "data/dictionary.csv"
# Load data from csv
csv = CSVData()
english_words = csv.load_data(path_data)
# strip() for all column in dict
nm = NormalizeWord()
english_words = nm.trim_all_column(english_words)
print english_words.head(3)
if __name__ == "__main__":
main()
|
999,592 | 8f23613cbc9503ebbd467a7d65e6ed1187ec6b86 | import pdb
primeseries = []
def findprime(n):
i=0
while i<len(n):
j =2
flag=0
while j < n[i]:
if n[i]%j==0:
flag=1
break
j += 1
if flag == 0:
primeseries.append(n[i])
i += 1
# print "prime numbers series is",primeseries
addprime(primeseries)
def addprime(n):
pdb
i=0
adprime = 0
while i< len(n):
adprime+=n[i]
print adprime
def Main():
pdb
series = [1, 2, 4, 56, 78, 79, 23]
findprime(series)
if __name__ == '__main__':
Main()
|
999,593 | bde8eb41fe47084657ce1d37cf6d3eddc2b85f96 | from abc import ABC, abstractmethod
class BaseRetriever(ABC):
@abstractmethod
def retrieve(self, query, candidate_doc_ids=None, top_k=1):
pass
|
999,594 | b78b919130d96322d5a68ffc28be4ba70f01afe7 | import VNE_func as fc
import itertools
import numpy as np
from copy import copy, deepcopy
filename_sub = 'substrate1209_1.txt'
filename_vir = 'virtual1212.txt'
cpu_substrate, bw_substrate = read_substrate(filename_sub)
VNR_list = read_virtual(filename_vir)
num_of_timeslots = 5000
total_cpu = sum(cpu_substrate)
total_bw = sum(sum(bw_substrate))/2
print("total cpu = ",total_cpu)
print("total bw = ",total_bw)
acceptance = 0
total_cost = 0
total_revenue = 0
total_VNR = 0
node_resource_table = [[] for i in range(len(cpu_substrate))]
revenue_array = []
cost_array = []
acceptance_array = []
RC_array = []
cpu_utilization_array = []
bw_utilization_array = []
#先找出原本substrate network裡的所有link組合
link_pairs = list(find_all_links(bw_substrate))
bw_resource_table = [[] for i in range(len(link_pairs))]
#print("all pairs: ", link_pairs)
for time in range(num_of_timeslots):
print("time = ", time)
if VNR_list!=[]:
arr_time = VNR_list[0]
#先檢查是否有資源可以釋出
for i in range(len(cpu_substrate)): #node resource
for j in range(0,len(node_resource_table[i]),2):
if node_resource_table[i][j]==time:
cpu_substrate[i] = cpu_substrate[i]+ node_resource_table[i][j+1]
for i in range(len(link_pairs)): #link resource
for j in range(0,len(bw_resource_table[i]),2):
if bw_resource_table[i][j]==time:
which_link = link_pairs[i]
bw_substrate[which_link[0]][which_link[1]] = bw_substrate[which_link[0]][which_link[1]] + bw_resource_table[i][j+1]
bw_substrate[which_link[1]][which_link[0]] = bw_substrate[which_link[0]][which_link[1]]
#print("node_resource_table: ", node_resource_table)
#print("link_resource_table: ", bw_resource_table)
#print("cpu substrate: ", cpu_substrate)
#print("bw substrate: ", bw_substrate)
while arr_time==time:
#print("take a VNR!")
[arr_time, life_time, node_num, node_cap, bw_cap] = take_a_VNR(VNR_list, time) #pop one VNR from the VNR_list
#print("arr time: ", arr_time)
#print("life_time: ", life_time)
#print("node_num: ", node_num)
#print("node_cap: ", node_cap)
#print("bw_cap: ", bw_cap)
total_VNR = total_VNR+1
del VNR_list[0:2*node_num+2]
#print("VNR_list: ",VNR_list)
subnodes = list(range(0,len(cpu_substrate)))
#virlinks = list(range(0,len(node_cap)-1))
# test all permutations
all_combinations = list(itertools.permutations(subnodes, node_num))
#print(all_combinations)
opt_cost = sum(bw_cap)
temp_best_cost = 100000000
temp_opt = -1 #node是第幾個組合最好
opt_link_index = -1 #link是第幾個組合最好
#print("cpu substrate = ", cpu_substrate)
#print("bw_substrate = ", bw_substrate)
for i in range(len(all_combinations)):
#print("i = ",i)
cost = 100000000
node_check = 1
bw_check = 1
path_num = np.zeros(len(node_cap)-1)
for k in range(len(node_cap)):
if node_cap[k]>cpu_substrate[all_combinations[i][k]]: #先測試這組解是否都滿足node constraints
node_check = 0
break
if node_check==1: #node測試過再測試link
#print("pass node check, for the ",i, "combinations")
bw_substrate_copy = deepcopy(bw_substrate)
graph = build_graph(bw_substrate_copy,1)
path_comb = []
for s in range(len(bw_cap)):
path = depthFirst(graph, all_combinations[i][s], [], [])
path = find_all_path(all_combinations[i][s], all_combinations[i][s+1], path) #篩選出從source到destination的所有路境
path_comb.append(path)
#print("path_comb = ", path_comb)
total_path_comb = []
for s in range(len(bw_cap)):
each_path_num = list(range(0,len(path_comb[s])))
total_path_comb.append(each_path_num)
#print("total path comb = ", total_path_comb)
for k in range(len(bw_cap)):
if total_path_comb[k]==[]:
print("link mapping fail!")
bw_check = 0
break
if node_check==1 and bw_check==1:
all_comb = [[]]
possible_path_comb = list_all_comb(total_path_comb, all_comb)
#print("possible path comb = ", possible_path_comb)
# check whether each path candidate satisfies bw constraints, if yes, calculate the cost and compare with temp_best_cost.
for s in range(len(possible_path_comb)):
cost = 0
bw_copy = deepcopy(bw_substrate)
#print("bw_substrate_copy = ", bw_copy)
#print("turn to the ,", s, "possible_path_comb")
flag = 0
for k in range(len(bw_cap)):
if flag ==-1:
break
else:
current_path = path_comb[k][possible_path_comb[s][k]]
#print("path for virtual link ",k, "is ", current_path)
for h in range(len(current_path)-1):
bw_copy[current_path[h]][current_path[h+1]] = bw_copy[current_path[h]][current_path[h+1]] - bw_cap[k]
bw_copy[current_path[h+1]][current_path[h]] = bw_copy[current_path[h]][current_path[h+1]]
if bw_copy[current_path[h]][current_path[h+1]]>=0:
cost = cost + bw_cap[k]
#print("bw_copy = ",bw_copy)
#print("cost = ", cost)
else:
cost = 100000000
#print("link mapping fail!")
flag = -1
break
if cost<temp_best_cost:
#print("update current best!")
temp_best_cost = cost
temp_opt = i
opt_link_index = s
link_opt = []
for x in range(len(bw_cap)):
link_opt.append(path_comb[x][possible_path_comb[opt_link_index][x]])
#print("current link opt = ", link_opt)
if temp_best_cost == opt_cost: #找到最佳解就停止
print("Find the opt solution ", i, ", and the best path is ", link_opt )
break
if temp_opt!=-1 and opt_link_index!=-1:
#print("temp opt = ", temp_opt)
#print("link opt = ", link_opt)
print("the solution is ", all_combinations[temp_opt])
else:
print("reject this VNR!")
if temp_opt!=-1:
acceptance = acceptance+1
total_cost = total_cost + temp_best_cost
total_revenue = total_revenue + opt_cost
for i in range(len(node_cap)): #紀錄node資源什麼時候可以釋出,並且扣掉node資源
cpu_substrate[all_combinations[temp_opt][i]] = cpu_substrate[all_combinations[temp_opt][i]]-node_cap[i] #扣掉substrate node資源
node_resource_table[all_combinations[temp_opt][i]].append(arr_time+life_time)
node_resource_table[all_combinations[temp_opt][i]].append(node_cap[i])
#紀錄link資源什麼時候可以釋出,並且扣掉link資源
for i in range(len(node_cap)-1):
for j in range(len(link_opt[i])-1):
bw_substrate[link_opt[i][j]][link_opt[i][j+1]] = bw_substrate[link_opt[i][j]][link_opt[i][j+1]] - bw_cap[i] #扣掉頻寬資源
bw_substrate[link_opt[i][j+1]][link_opt[i][j]] = bw_substrate[link_opt[i][j]][link_opt[i][j+1]]
a = link_opt[i][j]
b = link_opt[i][j+1]
if a<b:
bw_resource_table[link_pairs.index([a,b])].append(arr_time+life_time)
bw_resource_table[link_pairs.index([a,b])].append(bw_cap[i])
else:
bw_resource_table[link_pairs.index([b,a])].append(arr_time+life_time)
bw_resource_table[link_pairs.index([b,a])].append(bw_cap[i])
if VNR_list!=[]:
arr_time = VNR_list[0]
else:
break
cpu_utilization_array.append(1 - sum(cpu_substrate)/total_cpu)
bw_utilization_array.append(1 - sum(sum(bw_substrate))/total_bw/2)
revenue_array.append(total_revenue)
cost_array.append(total_cost)
if total_cost>0:
RC_array.append(total_revenue/total_cost)
else:
RC_array.append(0)
if total_VNR>0:
acceptance_array.append(acceptance/total_VNR)
else:
acceptance_array.append(0)
#print all information
print("revenue =", revenue_array)
print("cost =", cost_array)
print("acceptance ratio =", acceptance_array)
print("RC ratio = ", RC_array)
print("CPU utilization =", cpu_utilization_array)
print("BW utilization =", bw_utilization_array)
|
999,595 | 6ec9be52fdb459d9d22e97f9eca5fb36fbb598cb | #!/usr/bin/env python
"""
Plot distribution of size increases
"""
import csv
import config
import matplotlib.pyplot as plt
import numpy as np
import math
config.setup()
# load data
data = []
with open(config.data_path + '/strata-simplication-increase.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in spamreader:
data.append(float(row[0]))
low = 2.
high = 2500.
print high
print max(data)
print low
print min(data)
assert(min(data) > 1/2)
assert(max(data) < high)
nbins = 40
fig, ax = plt.subplots()
ax.set_xscale('log')
rects = ax.hist(data, bins=10 ** np.linspace(math.log10(1./low), math.log10(high), 40))
plt.gca().set_xlim([1./low,high])
plt.xticks([ 1, 10, 100 ,1000], ["equal", "10x smaller", "100x smaller", "1000x smaller"], rotation=0)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(12)
# c = 0
# for rect in rects:
# height = rect.get_height()
# ax.text(rect.get_x() + rect.get_width()/2., height, '%d' % int(height*total),
# ha='center', va='bottom', size=10)
# ax.text(rect.get_x() + rect.get_width()/2., -0.004, str(c),
# ha='center', va='top', size=14)
# c += 1
plt.xlabel('Relative size decrease simplified formulas')
plt.ylabel('Number of formulas')
plt.subplots_adjust(left=0.13, bottom=0.18)
plt.savefig(config.output + '/size2.pdf')
|
999,596 | 04e845f918f89ff03a673e999e2008df2c827cf3 | from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
urlpatterns = patterns('',
url(r'^', include('core.urls')),
)
static_files = [
r'^static/(?P<path>.*)$',
r'^(?P<path>favicon.ico)$',
r'^(?P<path>robots.txt)$',
r'^(?P<path>humans.txt)$',
r'^(?P<path>crossdomain.xml)$',
r'^(?P<path>apple-touch-icon(-\d*x\d*)?(-precomposed)?.png)$',
]
for static_file in static_files:
urlpatterns += patterns('',
url(static_file, 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT,
}),
)
|
999,597 | 0599c0c50233f0a3c8192e4adfccac4f3d936c58 | # Copyright (c) Alibaba, Inc. and its affiliates.
from easycv.datasets.segmentation import SegDataset as _SegDataset
from modelscope.metainfo import Datasets
from modelscope.msdatasets.cv.easycv_base import EasyCVBaseDataset
from modelscope.msdatasets.task_datasets.builder import TASK_DATASETS
from modelscope.utils.constant import Tasks
@TASK_DATASETS.register_module(
group_key=Tasks.image_segmentation, module_name=Datasets.SegDataset)
class SegDataset(EasyCVBaseDataset, _SegDataset):
"""EasyCV dataset for Sementic segmentation.
For more details, please refer to :
https://github.com/alibaba/EasyCV/blob/master/easycv/datasets/segmentation/raw.py .
Args:
split_config (dict): Dataset root path from MSDataset, e.g.
{"train":"local cache path"} or {"evaluation":"local cache path"}.
preprocessor (Preprocessor): An optional preprocessor instance, please make sure the preprocessor fits for
the model if supplied. Not support yet.
mode: Training or Evaluation.
data_source: Data source config to parse input data.
pipeline: Sequence of transform object or config dict to be composed.
ignore_index (int): Label index to be ignored.
profiling: If set True, will print transform time.
"""
def __init__(self,
split_config=None,
preprocessor=None,
mode=None,
*args,
**kwargs) -> None:
EasyCVBaseDataset.__init__(
self,
split_config=split_config,
preprocessor=preprocessor,
mode=mode,
args=args,
kwargs=kwargs)
_SegDataset.__init__(self, *args, **kwargs)
|
999,598 | a41078df9e66a12683870dccc7f8eed2b7b67e95 | def by_example(obj):
if isinstance(obj, basestring):
return {'type': 'string', 'default': obj}
if obj is None:
return {}
if isinstance(obj, (list, tuple)):
ret = {'type': 'array', 'default': obj}
if obj and obj[0] is not None:
item = by_example(obj[0])
item.pop('default')
ret['items'] = item
return ret
if isinstance(obj, dict):
ret = {'type': 'object', 'default': obj, 'properties': {}}
for k, v in obj.items():
ret['properties'][k] = by_example(v)
return ret
if isinstance(obj, bool):
return {'type': 'boolean', 'default': obj}
raise TypeError('Invalid item {0!r} of class {1}'.format(obj, type(obj)))
if __name__ == '__main__':
import sys
import json
import pprint
json_file = open(sys.argv[1])
pprint.pprint(by_example(json.load(json_file)))
|
999,599 | 12a5694106673a25d1e5f07a5518c76bab5e411d | from django.urls import path
from . import views
app_name = 'home'
urlpatterns = [
path('', views.index, name='index'),
# member
path('add-member/', views.add_member, name='add-member'),
path('member-list/', views.member_list, name='member-list'),
# book
path('add-book/', views.add_book, name='add-book'),
path('book-list/', views.book_list, name='book-list'),
# borrow
path('add-borrow/', views.add_borrow, name='add-borrow'),
path('add-borrow/<int:member_id>/', views.add_borrow2, name='add-borrow2'),
path('borrow-list/', views.borrow_list, name='borrow-list'),
path('user-book-borrow/<int:member_id>/', views.book_borrow_by_member, name='user-book-borrow'),
# return
path('return/', views.return_book_input, name='return-1'),
path('return/<int:book_id>/', views.return_book, name='return-2'),
# error success
path('404/', views.not_found, name='not_found'),
path('success/', views.success, name='success'),
# history
path('history-member/<int:member_id>/', views.history_member, name='history-member'),
path('history-book/<int:book_id>/', views.history_book, name='history-book'),
# delete
path('delete/<str:model>/<int:id>/', views.delete, name='delete'),
# book lost
path('book-lost/', views.book_lost_input, name="book-lost-input"),
path('book-lost/<int:book_id>/', views.book_lost, name='book-lost'),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.