index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
989,600 | 739063a13cfe1e8c6577e31f5a09735a1a064224 | # Program will draw a shape using nested loops
# 10/28/2018
# P4LAB - Nested Loops
# Jacob White
#
# Import Turtle
def main():
import turtle
# Assign varibale t to turtle
t = turtle.Turtle()
# Make turtle program background black
turtle.Screen().bgcolor('black')
# Pen color cyan
t.color('cyan')
# Display shape
for i in range(10):
for i in range(2):
t.forward(100)
t.right(60)
t.forward(100)
t.right(120)
t.right(36)
main()
|
989,601 | 2ba209ef9f74c88e2ebd7fad205cd1b2eb66fe77 | from sqlalchemy import Column, Date, Float, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy import create_engine
Base = declarative_base()
#TABLE INFORMATION ARE PLACED HERE.
class THE_TABLE_NAME(Base):
__tablename__ = 'THE_TABLE_NAME'
id = Column(Integer, primary_key=True)
firstcol = Column(String(60))
secondcol = Column(String(60))
thirdcol = Column(String(60)) |
989,602 | 3e9ed357a8407641fed08a14b84fc9ec97297dab | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""Utility functions for the Tools Service"""
import ossdbtoolsservice.utils.cancellation
import ossdbtoolsservice.utils.constants
import ossdbtoolsservice.utils.log
import ossdbtoolsservice.utils.serialization
import ossdbtoolsservice.utils.thread
import ossdbtoolsservice.utils.time
import ossdbtoolsservice.utils.validate # noqa
__all__ = [
'cancellation',
'constants',
'log',
'serialization',
'thread',
'time',
'validate'
]
|
989,603 | 441f4e0122fbb4210574f5185204bb67b1c99e40 | from .general import _pkg_root
from .dress import dress
from .dress_element import dress_element, BeamElement
from .beam_elements import *
from .line import Line
from .particles import Particles
from .particles import pyparticles_to_xtrack_dict
from .tracker import Tracker
from .monitors import generate_monitor_class
ParticlesMonitor = generate_monitor_class(Particles)
def enable_pyheadtail_interface():
import xtrack.pyheadtail_interface.pyhtxtparticles as pp
import xtrack as xt
xt.Particles = pp.PyHtXtParticles
|
989,604 | f17cd01350ea28981ac9edac87139fdaf14129c6 | # 11_07 Sending a Letter
lloyd = {
"name": "Lloyd",
"homework": [90.0, 97.0, 75.0, 92.0],
"quizzes": [88.0, 40.0, 94.0],
"tests": [75.0, 90.0]
}
alice = {
"name": "Alice",
"homework": [100.0, 92.0, 98.0, 100.0],
"quizzes": [82.0, 83.0, 91.0],
"tests": [89.0, 97.0]
}
tyler = {
"name": "Tyler",
"homework": [0.0, 87.0, 75.0, 22.0],
"quizzes": [0.0, 75.0, 78.0],
"tests": [100.0, 100.0]
}
def average(list_val):
list_sum = sum(list_val)
list_len = len(list_val)
result = list_sum / list_len
return result
def get_average(dic_val):
total = 0
for i in dic_val:
if(i != "name"):
list_sum = sum(dic_val[i])
list_len = len(dic_val[i])
list_avg = list_sum / list_len
if (i == "homework"):
total += list_avg * 0.1
elif(i == "quizzes"):
total += list_avg * 0.3
elif(i == "tests"):
total += list_avg * 0.6
return total
def get_letter_grade(score):
if(score >= 90):
return "A"
elif(score >= 80 and score < 90):
return "B"
elif(score >= 70 and score < 80):
return "C"
elif(score >= 60 and score < 70):
return "D"
else:
return "F"
score = get_average(lloyd)
print get_letter_grade(score)
|
989,605 | 6ff971985c4b55d883efe603ac190f6d697d8e20 | #import pandas as pd
#import scipy.io
#from pandas import Series,DataFrame
#from PIL import Image
#from os import listdir
#from os.path import isfile, join
import numpy as np
import cv2
import os
#from tempfile import TemporaryFile
#image_names = []
path = "./small_1"
valid_images = [".jpg",".jpeg",".png",".tga",".bmp"]
num_images = len(os.listdir(path))
target_height = 212
target_width = 398
images = np.empty((num_images,target_height,target_width,3), dtype='uint8')
labels = np.zeros(shape=(num_images,))
n=0;
for f in os.listdir(path):
ext = os.path.splitext(f)[1]
if ext.lower() not in valid_images:
continue
labels[n]=int(f[0])-1
image = cv2.imread(os.path.join(path,f))
image = image[40:-40,75:-75]
images[n,:,:,:] = image
n+=1
sel = np.random.permutation(num_images)
y = labels[sel]
x = images[sel]
ind = len(y)
x_train = x[:int(0.8*ind)]
y_train = y[:int(0.8*ind)]
# read validation data
x_val = x[int(0.8*ind):int(0.9*ind)]
y_val = y[int(0.8*ind):int(0.9*ind)]
# read test data
x_test = x[int(0.9*ind):]
y_test = y[int(0.9*ind):]
# checks
print('train shape:','\nData: ',x_train.shape,'\nlabels: ',y_train.shape)
print('val shape:','\nData: ',x_val.shape,'\nlabels: ',y_val.shape)
print('test shape:','\nData: ',x_test.shape,'\nlabels: ',y_test.shape)
print('saving data......')
# for compressive saving
np.savez_compressed('/home/ti1080/GP_Team#/drive-download-20170709T124015Z-001/Trian_data',y_train=y1,x_train=x_train)
np.savez_compressed('/home/ti1080/GP_Team#/drive-download-20170709T124015Z-001/Val_data',y_val=y2,x_val=x_val)
np.savez_compressed('/home/ti1080/GP_Team#/drive-download-20170709T124015Z-001/Test_data',y_test=y3,x_test=x_test)
## for loading
#loaded = np.load('/home/ti1080/GP_Team#/drive-download-20170709T124015Z-001/data.npz')
#y=loaded['y']
#x=loaded['x']
#for n in range(num_images):
#cv2.imshow('image',image)
#print(os.path.join(img_dir,image_names[n]))
#image = cv2.resize(image,(target_width,target_height))
#cv2.imshow('image',image)
|
989,606 | bee756799705f31aa80bded1dba1063c41829233 |
# coding: utf-8
# In[ ]:
s, m = 0,1
num = []
with open("input.txt",'r') as file_in:
for line in file_in:
a = line.split(' ')
for i in range(len(a)):
num.append(int(a[i]))
if (((int(a[i])) > 0)):
s = s + int(a[i])
mn = min(num)
mx = max(num)
for i in range(len(num)):
if (num[i] == mn):
index1 = i
if (num[i] == mx):
index2 = i
if (index1 <= index2):
for i in range(index1+1,index2):
m = m * num[i]
else:
for i in range(index2+1,index1):
m = m * num[i]
with open('output.txt','w') as file_out:
file_out.write(str(s))
file_out.write(' ')
file_out.write(str(m))
|
989,607 | 422179fde6e44668b2499cb9ba4f1857adc938a6 | import os
class CollectFood:
def __init__(self, input_file, output_file):
self.input_file = input_file
self.output_file = output_file
self.food_set = set()
def __loadData(self, input_file, food_set):
with open(input_file, "r") as file_in:
os.linesep='\r'
for line in file_in:
elements = line.rstrip().split("\t")
try:
food_cat = elements[2]
food_set.add(food_cat)
food_cat = elements[3]
food_set.add(food_cat)
print elements
except IndexError:
continue
def __outputFile(self, output_file, food_set):
with open(output_file, "w") as file_out:
for food in food_set:
file_out.write("{0}\n".format(food))
def runMe(self):
self.__loadData(self.input_file, self.food_set)
self.__outputFile(self.output_file, self.food_set)
if __name__ == '__main__':
input_file = '/Users/hideto/Desktop/temp_result1.txt'
output_file = '/Users/hideto/Desktop/food_set'
job = CollectFood(input_file, output_file)
job.runMe()
|
989,608 | 0200c18c1783f8dfba343c8de24640e367929772 | from Shared.API.infrastructure import ResourceManager
from Utils.guid import guid
import logging
import pytest
logger = logging.getLogger("test")
@pytest.mark.pasapi
@pytest.mark.bhavna
@pytest.mark.pas
def test_delete_password_profile(core_session):
"""
TC: C281495 - Delete Password Complex Profile
:param core_session: Authenticates API session
"""
profile_name_cps = f'Profile {guid()}'
profile_create_result, profile_create_success = ResourceManager.add_password_profile(core_session,
profile_name_cps,
min_pwd_len=12, max_pwd_len=24)
assert profile_create_success, f"Profile: {profile_name_cps} failed to create due to {profile_create_result}"
logger.info(f"Profile {profile_name_cps} successfully created and result is {profile_create_result}")
all_profiles_result, all_profiles_success = ResourceManager.get_profiles(core_session, type='All', rr_format=True)
assert all_profiles_success, f"Failed to get profiles list {all_profiles_result}"
logger.info(f"Successfully get the list of all the profiles {all_profiles_result}")
cloned_builtin_profile_id = []
for profile_list in all_profiles_result['Results']:
if profile_list['Row']['Name'] == profile_name_cps:
cloned_builtin_profile_id.append(profile_list['Row']['ID'])
profile_delete_success = ResourceManager.delete_password_profile(core_session, cloned_builtin_profile_id[0])
assert profile_delete_success, f"Failed to delete profile {profile_name_cps}"
logger.info(f"Successfully deleted password profile {profile_name_cps}")
|
989,609 | ca76eb6a30404078579c1300349af8cc840a397c | from functools import wraps
from flask import (
Blueprint,
flash,
g,
redirect,
render_template,
request,
session,
url_for
)
from werkzeug.security import (
check_password_hash,
generate_password_hash
)
from serafim.model import db_session_required
from serafim.model import User
auth_blueprint = Blueprint('auth', __name__, url_prefix='/auth')
@auth_blueprint.route('/login_admin', methods=['GET', 'POST'])
@db_session_required
def login_admin():
if request.method == 'GET':
return render_template("auth/login_admin.html")
form = request.form
username = form['username']
password = form['password']
db_session = g.get('db_session')
error = None
user = db_session.query(User).filter(User.username == username).first()
if user is None:
error = 'Incorrect username'
elif not check_password_hash(user.password, password):
error = 'Incorrect password'
if error is None:
session.clear()
session['user_id'] = user.id
session['username'] = user.username
session['role'] = user.role
return redirect(url_for('admin.admin_list_dataset'))
flash(error)
return redirect(url_for('auth.login_admin'))
@auth_blueprint.route('/signup_user', methods=['GET', 'POST'])
@db_session_required
def signup_user():
if request.method == 'GET':
return render_template('user/signup.html')
form = request.form
username = form['username']
password = form['password']
db_session = g.get('db_session')
user = User(username=username, nama=username, password=generate_password_hash(password), role='user')
db_session.add(user)
db_session.commit()
return redirect(url_for('auth.login_user'))
@auth_blueprint.route('/login_user', methods=['GET', 'POST'])
@db_session_required
def login_user():
if request.method == 'GET':
return render_template("user/login.html")
form = request.form
username = form['username']
password = form['password']
db_session = g.get('db_session')
error = None
user = db_session.query(User).filter(User.username == username).filter(User.role == 'user').first()
if user is None:
error = 'Incorrect username'
elif not check_password_hash(user.password, password):
error = 'Incorrect password'
if error is None:
session.clear()
session['user_id'] = user.id
session['username'] = user.username
session['role'] = user.role
return redirect(url_for('user.user_prediksi_form'))
flash(error)
return redirect(url_for('auth.login_user'))
@auth_blueprint.route('/logout')
def logout():
session.clear()
return redirect('/')
def auth_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user_id' not in session:
return redirect(url_for('auth.login_admin'))
return f(*args, **kwargs)
def admin_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user_id' not in session:
return redirect(url_for('auth.login_admin'))
if ('role' not in session) or (session['role'] != 'admin'):
return redirect(url_for('auth.login_admin'))
return f(*args, **kwargs)
return decorated_function
def user_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user_id' not in session:
return redirect(url_for('auth.login_user'))
if ('role' not in session) or (session['role'] != 'user'):
return redirect(url_for('auth.login_user'))
return f(*args, **kwargs)
return decorated_function |
989,610 | 621d7b9e4db536c52cf46fac5df23501bb7b8acb | #
# This SCons Tool is executed after transpSConsSetup and is meant to modify
# the final environment variables used in the builders. This is probably
# only useful for debugging.
#
# The environment variable TRANSP_SCONS_DIR can be used to add one or
# more directories to the tool path which is used to find all of the tools.
#
def generate(env):
pass
def exists(env):
return 1
|
989,611 | 2e37205a6c6fe44c0633f79fdee13b50ddfc0304 | count = 0
num_lines = 0
#with codecs.open('alice.txt','r',encoding='utf-8') as f:
myfile = open('alice.txt', 'r')
for line in myfile:
tmplist=line.split()
num_lines += 1
print tmplist
#print num_lines
print tmplist
# count = count + len(tmplist)
#print count
#for line in f.readlines():
# lines = len(line.strip())
# num_lines += len(lines)
#print num_words |
989,612 | d776a4fc56c7cf502597ac031db2ea245b7ee62d | # Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zaza.charm_lifecycle.prepare as lc_prepare
import unit_tests.utils as ut_utils
class TestCharmLifecyclePrepare(ut_utils.BaseTestCase):
def test_prepare(self):
self.patch_object(lc_prepare.zaza.controller, 'add_model')
self.patch_object(lc_prepare.deployment_env, 'get_model_settings')
self.patch_object(lc_prepare.deployment_env, 'get_model_constraints')
self.patch_object(lc_prepare.deployment_env, 'get_cloud_region')
self.patch_object(lc_prepare.deployment_env, 'get_cloud_name')
self.patch_object(lc_prepare.zaza.model, 'set_model_constraints')
self.get_model_settings.return_value = {'default-series': 'hardy'}
self.get_model_constraints.return_value = {'image-stream': 'released'}
lc_prepare.prepare('newmodel')
self.add_model.assert_called_once_with(
'newmodel',
config={
'default-series': 'hardy'},
cloud_name=None,
region=None)
self.set_model_constraints.assert_called_once_with(
constraints={'image-stream': 'released'},
model_name='newmodel')
def test_parser(self):
args = lc_prepare.parse_args([])
self.assertTrue(args.model_name.startswith('zaza-'))
def test_parser_model(self):
args = lc_prepare.parse_args(['-m', 'newmodel'])
self.assertEqual(args.model_name, 'newmodel')
def test_parser_logging(self):
# Using defaults
args = lc_prepare.parse_args(['-m', 'model'])
self.assertEqual(args.loglevel, 'INFO')
# Using args
args = lc_prepare.parse_args(['-m', 'model', '--log', 'DEBUG'])
self.assertEqual(args.loglevel, 'DEBUG')
|
989,613 | 66341a1f5d376ccfdb52fd33f5fb76d673788ee0 | ##THIS CLASS HANDLES THE CALL TO SIZE THE WINDOW##
##It returns a rectangle an points that describe the window##
from ctypes import *
from ctypes.wintypes import *
def WM_SIZE(cs,hwnd):
##Get Rectangle of selected screen
windll.user32.GetClientRect(hwnd,pointer(cs.variables.Client_window.rcClient))
##Translate that Rectangle to usable POINT types
cs.variables.Client_window.ptClientUL.x=cs.variables.Client_window.rcClient.left
cs.variables.Client_window.ptClientUL.y=cs.variables.Client_window.rcClient.top
cs.variables.Client_window.ptClientLR.x=cs.variables.Client_window.rcClient.right
cs.variables.Client_window.ptClientLR.y=cs.variables.Client_window.rcClient.bottom
##Convert Points to reference the upper left hand corner of the screen
##instead of the window
windll.user32.ClientToScreen(hwnd, pointer(cs.variables.Client_window.ptClientUL))
windll.user32.ClientToScreen(hwnd, pointer(cs.variables.Client_window.ptClientLR))
##Convert the rectangle to reference the screen instead of the window
windll.user32.SetRect(pointer(cs.variables.Client_window.rcClient),cs.variables.Client_window.ptClientUL,
cs.variables.Client_window.ptClientLR)
return
|
989,614 | ac846c76b06d9e1faab962ff8c81930aea08d2c1 | # Generated by Django 3.0.5 on 2020-07-05 02:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='instr_transpose_shift',
field=models.CharField(choices=[('concert', '0'), ('Eb', '-3'), ('Bb', '2')], default='0', max_length=10),
),
]
|
989,615 | 991e2c3449e7052e577f4efd3aa444c32b202836 | # coding:utf-8
'''
plsi_helper.py
'''
import os, sys
def main(argv):
pass
def combine_corpora(folder):
for file in os.listdir(folder):
if file[0]!='.':
lines = [line.strip() for line in open(folder+file)]
chunk = ' '.join(lines)
print(chunk)
if __name__ == '__main__':
# main(sys.argv)
combine_corpora(folder='./reuters/training/') |
989,616 | 1dcf75ab84a04d085cf0893b2e8db90b33a6044e | from travels.views import Views
r = Views()
urls = [
('routes', r.routes),
('new_route', r.new_route),
('get_route', r.get_route)
]
|
989,617 | 1d7ed44abc855de118920955a0da5b07147340f5 | ## ScrollingMemory class
##
## Attributes are:
## 1. an internal list holding the cinematic data:
## self.memory_list
## 2. the maximum size of that list:
## self.max_size
##
## Only the method
## continuation(self, pattern_list, horizon)
## produces an output, i.e. the continuation of a matching pattern_list.
##
## The methods
## record(self, item)
## and
## move_item(self, item, displacement, discard_flag)
## simply update the self.memory_list, according to the input data.
##
import random
import mlm_util # bounded_add, bidx, bitem, list_unify
#############################################################################
class ScrollingMemory(object):
'''A Finite Sized List of Frames or Scenes'''
def __init__(self, max_size):
# a negative max_size means a practically unbound memory
if max_size > 0:
self.max_size = max_size
else:
self.max_size = 9999
self.memory_list = []
# records a new item in top of self.memory_list
def record(self, item):
# insert on top (slower than append!)
self.memory_list.insert(0,item)
# trim the bottom of the list
if len(self.memory_list) >= self.max_size:
self.memory_list = self.memory_list[0:self.max_size]
# move an item found in the memory_list
# discard flag set discards items sent to the very bottom of self.memory_list
def move_item(self, item, displacement, discard_flag):
if item in self.memory_list:
# remove item
item_index = self.memory_list.index(item)
self.memory_list.remove(item)
# calculate displaced location within bounds
new_item_index = mlm_util.bounded_add(item_index,displacement,
0,len(self.memory_list))
# if anchor_index is smaller than max_size, do reinsert item
if new_item_index + 1 < self.max_size:
self.memory_list.insert(new_item_index,item)
# if anchor_index is equal or greater than max_size, check discard_flag
elif discard_flag < 1:
self.memory_list.insert(new_item_index,item)
# locates a continuation for a pattern in memory_list, up to horizon
# right now its time oriented to the left
# i.e. self.memory_list[0] is the most recent item
def continuation(self, pattern_list, horizon):
len_pattern = len(pattern_list)
memory_list = self.memory_list
# ... just for speed sake, use this local variable in the while loop
len_memory_list = len(self.memory_list)
found = False
anchor_index = horizon
search_result = []
while (not(found) and anchor_index <= (len_memory_list - len_pattern)
and len_pattern > 0 and len_memory_list > 0):
bottom_anchor_index = anchor_index + len_pattern
# here we use a simple unification criterion
search_result = (mlm_util.list_unify
(pattern_list,
memory_list[anchor_index:bottom_anchor_index])
)
if search_result[0]: # Holds unification Success: True or False
found = True
return [memory_list[(anchor_index - horizon):anchor_index],
# ... this is the continuation to the left
memory_list[anchor_index:bottom_anchor_index],
# ... this is the located pattern that satisfied pattern_list
memory_list]
# ... this is the list that was successfully searched
anchor_index += 2
return [[],[],[]]
|
989,618 | cdb32d797d5bebdfcef21a9e7b188b9e62d26dd8 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn import neighbors
from pandas import DataFrame
df_train =pd.read_csv("/Users/gaojie/Kaggle/data/digit-recognizer/train.csv")
df_test =pd.read_csv("/Users/gaojie/Kaggle/data/digit-recognizer/test.csv")
train_labels=df_train.label
knn=neighbors.KNeighborsClassifier(algorithm='kd_tree',n_neighbors=3)
print("training")
train_data = df_train.drop(columns='label')
knn.fit(train_data,train_labels)
print('get test data')
predictions=knn.predict(df_test)
s={'ImageId':list(range(1,len(df_test)))}
s=DataFrame(s)
result = pd.DataFrame({'PassengerId':df_test.indx.values,'Label':predictions.astype(np.int32)})
result.to_csv("/Users/gaojie/Kaggle/data/digit-recognizer/nn_predictions1.csv",index=False)
print('finished!') |
989,619 | 1d3ba074276e1ab82653a45d995da208f7eec8a3 | # Sophie Johnson
# 11/24/18
import cherrypy
import json
from _song_database import _song_database
# Recommendations class
class RecommendationController(object):
# Constructor
def __init__(self, sdb=None):
# Initialize database
if sdb is None:
self.sdb = _movie_database()
else:
self.sdb = sdb
# Retrieve song recommendations given title
def GET_RECOMMENDATION_SONGS_TITLE(self, title):
output = {'result' : 'success'}
title = str(title)
# Attempt to get recommendation
try:
songs = self.sdb.recommend_similar_songs(title)
if songs:
output['song_recommendations'] = list(songs)
else:
output['result'] = 'error'
output['message'] = "Song not found in database."
# Handle exception as error
except Exception as e:
output['result'] = 'error'
output['message'] = str(e)
return json.dumps(output)
# Retrieve song recommendations given title
def GET_RECOMMENDATION_ARTISTS_NAME(self, name):
output = {'result' : 'success'}
name = str(name)
# Attempt to get recommendation
try:
artists = self.sdb.recommend_similar_artists(name)
if artists:
output['artist_recommendations'] = list(artists)
else:
output['result'] = 'error'
output['message'] = "Artist not found in database."
# Handle exception as error
except Exception as e:
output['result'] = 'error'
output['message'] = str(e)
return json.dumps(output)
|
989,620 | f9bdef88a9ce3452aef54bba5082e0f506898e8e | # MDP from the example by Sebastian Thrun in: https://www.youtube.com/watch?v=glHKJ359Cnc&t=44s
mdp = [[-3, -3, -3, 100], [-3, None, -3, -100], [-3, -3, -3, -3]]
# Initial utilities
utilities = [[0, 0, 0, 100], [0, None, 0, -100], [0, 0, 0, 0]]
# Actions
# These are the actions the agent can perform,
# they are defined as lists which mark positional changes.
# See function transition(s, direction)
up = [-1, 0]
down = [1, 0]
left = [0, -1]
right = [0, 1]
def hit_wall(s):
"""Checks if the agents hit a wall.
A wall in this world is defined as either entering a position
that exceeds the bounds of the mpd list or a position that
has None as its value.
:param s: The position/state of the agent represented as a list of two ints
:return: True, if agent would hit a wall with the next step
"""
if s == [1, 1]: # We would enter the None-field
return True
elif s[0] < 0 or s[0] > 2 or s[1] < 0 or s[1] > 3: # We would be out of bounds
return True
else:
return False
def transition(s, direction):
"""The agent makes a transition in the world from on state into the next one
The agent needs to move in the world. Therefore the action e.g. right can
be added to the position of the agent.
Example: Position = [0, 0], Action = right => [0 ,0] + [0, 1] = [0, 1]
If the agents tries to move but would hit a wall the new position
will be the old position.
:param s: The position/state of the agent represented as a list of two ints
:param direction: The direction in which the agent moves
:return: The new position
"""
new_pos = [sum(x) for x in zip(s, direction)] # sum up every element at same index of two lists
if hit_wall(new_pos):
return s
else:
return new_pos
def get_utility(s, direction):
"""Gets the utility from a certain state s after action direction
:param s: The position/state of the agent represented as a list of two ints
:param direction: The direction in which the agent moves
:return: The utility of the newly reached state
"""
new_pos = transition(s, direction)
new_utility = utilities[new_pos[0]][new_pos[1]]
return new_utility
def value(s):
"""Equation that computes the value fpr every state s
Designed after the Bellman Equation but in a simplified version.
U(s) = R(s) + max(P(s'|s, a) U(s))
Computes the reward plus the max value of all neighbouring utilities multiplied by their respective
probability to reach the stat s'.
The index of the state in the mdp list holds the reward.
:param s: The position/state of the agent represented as a List of two ints
:return: None
"""
utilities[s[0]][s[1]] = mdp[s[0]][s[1]] + max(
[0.8 * get_utility(s, up) + 0.1 * get_utility(s, left) + 0.1 * get_utility(s, right), # go up
0.8 * get_utility(s, down) + 0.1 * get_utility(s, left) + 0.1 * get_utility(s, right), # go down
0.8 * get_utility(s, right) + 0.1 * get_utility(s, up) + 0.1 * get_utility(s, down), # go right
0.8 * get_utility(s, left) + 0.1 * get_utility(s, up) + 0.1 * get_utility(s, down)]) # go left
# print(utilities) #Uncomment to see the utilities of every iteration
def value_iteration(iterations):
"""Iterates through all the states and calculates value function
:param iterations: Number of iterations, this is up to you. 100 is sufficient for this problem
:return: None
"""
for _ in range(0, iterations):
for i in range(0, 3):
for j in range(0, 4):
# We don't need values for start/end/non-reachable states
if [i, j] == [1, 1] or [i, j] == [0, 3] or [i, j] == [1, 3]:
None
else:
value([i, j])
print(utilities)
value_iteration(100)
|
989,621 | 23348513c90e193e570d79951a63d6f2939c9059 | croatia = ['c=', 'c-', 'dz=', 'd-', 'lj', 'nj', 's=', 'z=']
string = input()
for i in croatia:
string = string.replace(i, '#')
print(len(string))
|
989,622 | 16e5e0de74d8bc42b45d4610cf1de8510d164996 | # Copyright 2015 The Cobalt Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
'sb_pedantic_warnings': 1,
},
'targets': [
{
'target_name': 'audio',
'type': 'static_library',
'sources': [
'async_audio_decoder.cc',
'async_audio_decoder.h',
'audio_buffer.cc',
'audio_buffer.h',
'audio_buffer_source_node.cc',
'audio_buffer_source_node.h',
'audio_context.cc',
'audio_context.h',
'audio_destination_node.cc',
'audio_destination_node.h',
'audio_device.cc',
'audio_device.h',
'audio_file_reader.cc',
'audio_file_reader.h',
'audio_file_reader_wav.cc',
'audio_file_reader_wav.h',
'audio_helpers.h',
'audio_node.cc',
'audio_node.h',
'audio_node_input.cc',
'audio_node_input.h',
'audio_node_output.cc',
'audio_node_output.h',
],
'dependencies': [
'<(DEPTH)/cobalt/base/base.gyp:base',
'<(DEPTH)/cobalt/browser/browser_bindings_gen.gyp:generated_types',
],
'export_dependent_settings': [
# Additionally, ensure that the include directories for generated
# headers are put on the include directories for targets that depend
# on this one.
'<(DEPTH)/cobalt/browser/browser_bindings_gen.gyp:generated_types',
]
},
],
}
|
989,623 | 923e9ee75a9c105b82616c201ece4ff5b9502d87 | #!/usr/bin/python
#If p is the perimeter of a right angle triangle with integral length sides, {a,b,c}, there are exactly three solutions for p = 120.
#
#{20,48,52}, {24,45,51}, {30,40,50}
#
#For which value of p 1000, is the number of solutions maximised?
numSolutions = [0] * 1001
for n in range(1,500):
m = n+1
while m < 500:
a = m * m - n * n
b = 2 * m * n
c = m * m + n * n
k = 1
while k * (a + b + c) < 1000:
numSolutions[k * (a+b+c)] += 1
k += 1
m+=2
maxSolution = (0,0)
for s in xrange(len(numSolutions)):
if numSolutions[s] > maxSolution[1]:
maxSolution = (s, numSolutions[s])
print "The answer is", maxSolution[0], "with", maxSolution[1], "answers"
|
989,624 | cb31513711315bf10ff915735a8d670babaa70cc | import os
import csv
votes = 0
election_data = os.path.join(r"C:\Users\khanh\Desktop\UCIRV201810DATA4\Homeworks\HW03-Python\PyPoll\Resources\election_data.csv")
Khan = Correy = Li = Tooley = 0
percentage_Khan = percentage_Correy = percentage_Li = percentage_Tooley = 0
with open(election_data, "r") as csvfile:
py_poll = csv.reader(csvfile, delimiter=',')
next(py_poll)
for row in py_poll:
votes += 1
if row[2] == "Khan":
Khan += 1
elif row[2] == "Correy":
Correy += 1
elif row[2] == "Li":
Li += 1
elif row[2] == "O'Tooley":
Tooley += 1
percentage_Khan = (Khan / votes) * 100
percentage_Correy = (Correy / votes) * 100
percentage_Li = (Li / votes) * 100
percentage_Tooley = (Tooley / votes) * 100
print("Election Results")
print(f"Total votes: ({votes})")
winner = max(Khan, Correy, Li, Tooley)
if(winner == Khan):
print(f"Winner: Khan ({winner})")
if (winner == Correy):
print(f"Winner: Correy ({winner})")
if (winner == Li):
print(f"Winner: Li ({winner})")
if (winner == Tooley):
print(f"Winner: O'Tooley ({winner})")
print(f"Khan: {round(percentage_Khan,3)}% ({Khan})")
print(f"Correy: {round(percentage_Correy,3)}% ({Correy})")
print(f"Li: {round(percentage_Li,3)}% ({Li})")
print(f"O'Tooley: {round(percentage_Tooley,3)}% ({Tooley})")
|
989,625 | 3950e06d4c18dd42a11833df894ab24807936955 | import tkinter as tk
import PIL
from PIL import Image, ImageTk
import cv2
import numpy as np
import google_cloud_storage
from database import BookingDatabase, LoginDatabase, UserDatabase, EmployeesDatabase, IssuesDatabase
import socket_communication
import json
import datetime
import camera
import os
gcs = google_cloud_storage.GoogleCloudStorage()
class AgentPiApp(tk.Tk):
"""Agent PI GUI built with tkinter, is a subclass of the Tk class, a window, used for customers to login with their credentials
or face to access the car, for engineer to show their QR code to access the car
"""
def __init__(self, car_id):
tk.Tk.__init__(self)
self.car_id = car_id
print("Agent for car of id "+ str(car_id) + " is created")
self.attributes('-fullscreen', True)
# To rename the title of the window
self.title("Car Sharing System - By Big City Bois")
# self.geometry('960x540')
# self.window.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self._frame = None
self.switch_frame(LoginPage)
def is_access_allowed(self, user_id):
"""Return True or False if the user is allowed to access the car or not
Parameters
----------
user_id
The Id of the user trying to access the car
"""
### DATABASE CODE GOES HERE
return False
def get_accessible_user_id(self):
"""Return the user id that is allowed to access the car, at current time
"""
### DATABASE CODE GOES HERE
return 1
def switch_frame(self, frame_class):
"""To switch the main window frame to another
Parameters
----------
frame_class
The frame subclass to switch to
"""
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
class LoginPage(tk.Frame):
"""Login Page frame, to show the options available to the users
"""
def __init__(self, master):
"""Initializing the login page frame
"""
tk.Frame.__init__(self, master)
self.master = master
# Set the UI
self.welcome = tk.Label(self, text = "Welcome!", font=("Arial Bold", 50)).grid(row = 0, ipady = 80)
self.login_frame = tk.LabelFrame(self, width = 50)
self.login_frame.grid(row = 1)
label_username = tk.Label(self.login_frame, text = "Username: \t", font=("Arial Bold", 30)).grid(row = 1, column = 0, pady = 5)
self.entry_username = tk.Entry(self.login_frame, width = 20, font=("Arial Bold", 30))
self.entry_username.grid(row = 1, column = 1, pady = 5)
label_password = tk.Label(self.login_frame, text = "Password: \t", font=("Arial Bold", 30)).grid(row = 2, column = 0, pady = 5)
self.entry_password = tk.Entry(self.login_frame, width = 20, font=("Arial Bold", 30), show="*")
self.entry_password.grid(row = 2, column = 1, pady = 5)
self.bt_login = tk.Button(self.login_frame, text = "Login", font=("Arial Bold", 30), fg = "red", command = self.login_bt_pressed)
self.bt_login.grid(row = 3, columnspan = 2, pady = 15)
self.bt_login_face = tk.Button(self, width = 30, text = "Login with facial recognition", font=("Arial Bold", 30), fg = "red", command=lambda: master.switch_frame(FacePage))
self.bt_login_face.grid(row = 3, pady = 60)
self.bt_login_qr = tk.Button(self, width = 30 , text = "Engineer QR", command=lambda: master.switch_frame(QrPage), font=("Arial Bold", 30), fg = "red")
self.bt_login_qr.grid(row = 4)
def login_bt_pressed(self):
username = self.entry_username.get()
password = self.entry_password.get()
print("Username: " + username)
print("Password: " + password)
### IDENTIFICATION & SOCKET CODE GOES HERE
if username == "Hieu" and password == "Hieu":
self.master.switch_frame(AccessGranted)
else:
self.master.switch_frame(AccessDenied)
return (self.entry_username.get(),self.entry_password.get())
class AccessGranted(tk.Frame):
"""Showing the screen so users knows that they have successfully identified as the user of the car
"""
def __init__(self, master):
"""Create the access granted frame
"""
tk.Frame.__init__(self, master)
self.access_granted = tk.Label(self, text = "Access Granted!", font=("Arial Bold", 100), fg = "green", pady = 80).pack()
tk.Button(self, text = "Back", font=("Arial Bold", 30), command=lambda: master.switch_frame(LoginPage)).pack()
class AccessDenied(tk.Frame):
"""Showing the screen so users knows that they been refused to access the car
"""
def __init__(self, master):
"""Create the access denied frame
"""
tk.Frame.__init__(self, master)
self.access_granted = tk.Label(self, text = "Access Denied!", font=("Arial Bold", 100), fg = "red", pady = 80).pack()
tk.Button(self, text = "Back", font=("Arial Bold", 30), command=lambda: master.switch_frame(LoginPage)).pack()
class FacePage(tk.Frame):
""" The Class to manage the facial recognition frame
"""
def __init__(self,master):
"""Initializing the facial recognition frame
"""
tk.Frame.__init__(self, master)
self.master = master
### GET NAME OF USER HERE
self.user_id = self.master.get_accessible_user_id()
# self.user_name =
gcs.download_trainer()
# Get all users from MySQL Database
# login_db = LoginDatabase()
# self.user_dict = users.get_all()
# print(self.user_dict)
# Download all neccessary files
gcs.download_trainer()
# Create Face Detector
self.faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# Create Local Binary Patterns Histograms (LBPH) Face Recognizer with pre-trained weights
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.recognizer.read('trainer.yml')
# Create a VideoCamera object
self.vid = ApVideoCapture()
# Elements on the UI
self.canvas = tk.Canvas(self, width = self.vid.width, height=self.vid.height)
self.canvas.pack()
tk.Button(self, text="Back", font=("Arial Bold", 30), command=lambda: master.switch_frame(LoginPage)).pack()
self.identification_count = 0
self.update()
def update(self):
"""Update the canvas showing the camera feed on the frame
"""
# Get frame from video source:
ret, frame = self.vid.read()
if ret:
# Convert the captured frame into grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
# Get all faces from the video frame
faces = self.faceCascade.detectMultiScale(gray, 1.2,5)
# For each face in faces
for (x, y, w, h) in faces:
# Create rectangle around the face
cv2.rectangle(frame, (x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)
# Recognize the face belongs to which ID
Id = self.recognizer.predict(gray[y:y+h,x:x+w])
### IDENTIFICATION & SOCKET CODE GOES HERE
if Id[0] == self.user_id:
# If the target face is found 10 times then access is granted
self.identification_count += 1
if self.identification_count > 10:
self.master.switch_frame(AccessGranted)
name_to_put = self.user_name
else:
name_to_put = "Unknown - Access Denied"
# Put text describe who is in the picture
cv2.rectangle(frame, (x-22,y-90), (x+w+22, y-22), (0,255,0), -1)
cv2.putText(frame, str(name_to_put), (x,y-40), self.font, 2, (255,255,255), 3)
self.after(50, self.update)
class QrPage(tk.Frame):
""" The Class to manage the QR recognition frame
...
"""
def __init__(self,master):
"""Initializing the qr recognition frame
"""
tk.Frame.__init__(self, master)
self.master = master
# Create VideoCamera object
self.vid = ApVideoCapture()
# Create a QR Detector
self.detector = cv2.QRCodeDetector()
# Declare elements on the UI
self.canvas = tk.Canvas(self, width = self.vid.width, height = self.vid.height)
self.canvas.pack()
tk.Button(self, text = "Back", font=("Arial Bold", 30), command=lambda: master.switch_frame(LoginPage)).pack()
# Start update the UI with camera
self.update()
def update(self):
"""Update the canvas showing the camera feed on the frame
"""
# Get frame from video source:
ret, frame = self.vid.read()
if ret:
try:
data, bbox, _ = self.detector.detectAndDecode(frame)
if bbox is not None:
print(bbox)
# display the image with lines
for i in range(len(bbox)):
# draw all lines
cv2.line(frame, tuple(bbox[i][0]), tuple(bbox[(i+1) % len(bbox)][0]), color=(255, 0, 0), thickness=2)
if data:
### QR Code content is data, require IDENTIFICATION CODE AND SOCKET CODE
print("[+] QR Code detected, data:", data)
self.after(15, self.update)
class ApVideoCapture:
""" Provide video capture to the other frames
"""
def __init__(self, video_source = 0):
""" Turn on the camera
Parameters
----------
video_source
The index of the camera to use, defaults to 0
"""
self.vid = cv2.VideoCapture(video_source)
if not self.vid.isOpened():
raise ValueError("Unable to open video source", video_source)
# Get video source width and height
self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)
self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
def __del__(self):
""" Turn off the camera
"""
self.vid.release()
def get_frame(self):
""" Get the frame that the camera captured as a tuple of (ret, frame)
"""
if self.vid.isOpened():
ret, frame = self.vid.read()
if ret:
return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
else:
return (ret, None)
else:
return (ret, None)
if __name__ == "__main__":
app = AgentPiApp(1)
app.mainloop() |
989,626 | d5da7a7a50c177ee6162dbbbe48887e57f2008df | """ Interacts with a Docker Daemon on a remote instance"""
import random
from typing import (
Any,
Dict,
List,
Optional
)
import docker
from requests.exceptions import ConnectionError, Timeout
from loadsbroker import logger
from loadsbroker.util import retry
StrDict = Dict[str, str]
DOCKER_RETRY_EXC = (ConnectionError, Timeout)
def split_container_name(container_name):
"""Pulls apart a container name from its tag"""
parts = container_name.split(":")
if len(parts) > 1:
return parts
else:
return parts, None
class DockerDaemon:
def __init__(self, host, timeout=5):
self.host = host
self.timeout = timeout
self.responded = False
self._client = docker.Client(base_url=host, timeout=timeout)
def get_containers(self, all=False):
"""Returns a list of containers
:param all: Whether to include **non-running** containers.
"""
return {cont['Id']: cont
for cont in self._client.containers(all=all)}
def _create_container(self, image, cmd=None):
"""creates a container
"""
name = 'loads_%d' % random.randint(1, 9999)
container = self._client.create_container(image, name=name,
command=cmd,
detach=True)
id = container['Id']
self._client.start(container=id, publish_all_ports=True)
return name, id
def run(self, commands, image):
"""Runs commands in a new container.
Sends back a blocking iterator on the log output.
"""
cmd = '/bin/sh -c "%s"' % ';'.join(commands)
cname, cid = self._create_container(image, cmd=cmd)
return cid, self._client.attach(cid, stream=True, logs=True)
def exec_run(self, cid: str, cmd: str) -> bytes:
"""Run a command in an existing container."""
execid = self._client.exec_create(cid, cmd)
return self._client.exec_start(execid['Id'])
def kill(self, cid):
"""Kills and remove a container.
"""
self._client.remove_container(cid, force=True)
def stop(self, cid, timeout=15, capture_stream=None):
"""Stops and removes a container."""
self._client.stop(cid, timeout)
self._client.wait(cid)
if capture_stream:
capture_stream.write(self._client.logs(cid, timestamps=True))
self._client.remove_container(cid)
def pull_container(self, container_name):
"""Pulls a container image from the repo/tag for the provided
container name"""
result = self._client.pull(container_name, stream=True)
return list(result)
def import_container(self, client, container_url):
"""Imports a container from a URL"""
stdin, stdout, stderr = client.exec_command(
'curl %s | docker load' % container_url)
# Wait for termination
output = stdout.channel.recv(4096)
stdin.close()
stdout.close()
stderr.close()
return output
@retry(on_exception=lambda exc: isinstance(exc, DOCKER_RETRY_EXC))
def has_image(self, container_name):
"""Indicates whether this instance already has the desired
container name/tag loaded.
Example of what the images command output looks like:
[{'Created': 1406605442,
'RepoTags': ['bbangert/simpletest:dev'],
'Id': '824823...31ae0d6fc69e6e666a4b44118b0a3',
'ParentId': 'da7b...ee6b9eb2ee47c2b1427eceb51d291a',
'Size': 0,
'VirtualSize': 1400958681}]
"""
name, tag = split_container_name(container_name)
images = self._client.images(all=True)
return any(container_name in image["RepoTags"] for image in images)
def run_container(self,
name: str,
command: Optional[str] = None,
env: Optional[StrDict] = None,
volumes: Optional[Dict[str, StrDict]] = None,
ports: Optional[Dict[Any, Any]] = None,
dns: Optional[List[str]] = None,
pid_mode: Optional[str] = None,
entrypoint: Optional[str] = None):
"""Run a container given the container name, env, command args, data
volumes, and port bindings."""
if volumes is None:
volumes = {}
if dns is None:
dns = []
expose = []
port_bindings = {}
for port in ports.keys():
if isinstance(port, tuple):
proto = port[1] if len(port) == 2 else "tcp"
key = "%d/%s" % (port[0], proto)
else:
key = port
port_bindings[key] = ports[port]
expose.append(port)
result = self._client.create_container(
name, command=command, environment=env,
volumes=[volume['bind'] for volume in volumes.values()],
ports=expose,
entrypoint=entrypoint)
container = result["Id"]
result = self._client.start(container, binds=volumes,
port_bindings=port_bindings, dns=dns,
pid_mode=pid_mode)
response = self._client.inspect_container(container)
return response
def safe_run_container(self, name: str, *args, **kwargs) -> Any:
"""Call run_container until it succeeds
Max of 5 tries w/ attempts to stop potential zombie
containers.
"""
for i in range(5):
try:
return self.run_container(name, *args, **kwargs)
except Exception as exc:
logger.debug("Exception with run_container (%s)",
name, exc_info=True)
if i == 4:
logger.debug("Giving up on running container.")
raise
self.stop_container(name)
def containers_by_name(self, container_name):
"""Yields all containers that match the given name."""
containers = self._client.containers()
return (container for container in containers
if container_name in container["Image"])
def kill_container(self, container_name):
"""Locate the container of the given container_name and kill
it"""
for container in self.containers_by_name(container_name):
self.kill(container["Id"])
def stop_container(self,
container_name,
timeout=15,
capture_stream=None):
"""Locates and gracefully stops a container by name."""
for container in self.containers_by_name(container_name):
self.stop(container["Id"], timeout, capture_stream)
|
989,627 | 9866b1219097fba522ef0fe4c53f9e8194911f48 | import json
import web3
from web3 import Web3
from web3.contract import ConciseContract
import _thread
import logging
import numpy as np
def new(user):
email = user.get('email')
address = user.get('address')
import settings
mydb = settings.mydb
w3 = Web3(Web3.HTTPProvider("https://ropsten.infura.io/KZSQapS5wjr4Iw7JhgtE"))
private_key = "4944d078bfc34676f0e4fb942e29a1c3b18c347b51d0a648e936a4953115de6c"
acct = w3.eth.account.privateKeyToAccount("0x%s" % private_key)
w3.eth.defaultAccount = acct
#database.init()
#mydb = pickledb.load('/data/my.db', False)
#logger = logging.getLogger('user')
#logger.info("email: %s" % email)
#logger.info("address: %s" % address)
if not email in mydb:
oroxy_abi = open("/app/Proxy.abi", "r").read().replace('\n','')
#contract = file_object = open("/app/SignatureBouncer.sol", "r")
# TODO: add pub key
#compiled_sol = compile_source(contract) # Compiled source code
#contract_interface = compiled_sol['<stdin>:Greeter']
try:
proxy_abi = open("/app/Proxy.abi", "r").read().replace('\n','')
abi = open("/app/Factory.abi", "r").read().replace('\n','')
#bytecode = open("/app/SignatureBouncer.byte", "r").read().replace('\n','').encode("utf-8").hex()
#bytecode = Web3.toHex(text=open("/app/SignatureBouncer.byte", "r").read().replace('\n',''))
bytecode = open("/app/Factory.byte", "r").read().replace('\n','')
# w3 = Web3(Web3.EthereumTesterProvider())
#w3.eth.defaultAccount = w3.eth.accounts[0]
Factory = w3.eth.contract(abi=abi, bytecode=bytecode)
construct_tx = Factory.constructor().buildTransaction({
'from': acct.address,
'nonce': w3.eth.getTransactionCount(acct.address),
'gas': 1728712,
'gasPrice': w3.toWei('21', 'gwei')})
signed = acct.signTransaction(construct_tx)
dictionary = {}
dictionary["address"] = address
tx = w3.eth.sendRawTransaction(signed.rawTransaction)
def waitForResult(tx_hash):
contract_tx = w3.eth.waitForTransactionReceipt(tx_hash)
dictionary["contract"] = contract_tx.contractAddress
cont = w3.eth.contract(address=contract_tx.contractAddress, abi=abi)
dictionary["proxy_address"] = cont.functions.libraryAddress().call({'from': acct.address})
proxy = w3.eth.contract(address=dictionary["proxy_address"], abi=proxy_abi)
t = proxy.functions.init(address, address).buildTransaction({
'from': acct.address,
'nonce': w3.eth.getTransactionCount(acct.address),
'gas': 1728712,
'gasPrice': w3.toWei('21', 'gwei')})
s = acct.signTransaction(t)
w3.eth.sendRawTransaction(s.rawTransaction)
mydb[email] = dictionary
np.save('dict.npy', mydb)
_thread.start_new_thread(waitForResult, (tx,))
return tx.hex()
except Exception as e:
return "ERROR: %s" % e
return "no"
|
989,628 | 6e49ab386114a224be2f571a62002cfd3a0a4037 | def affiche_produit(nb1, nb2):
produit = nb1 * nb2
print(produit)
affiche_produit(14, 2)
|
989,629 | 86dc557363bf40b164c8aa5c9e6b8413b619cf7a | from django import forms
from .models import *
class question_form(forms.ModelForm):
class Meta:
model=questions_model
fields=['author','question']
class answer_form(forms.ModelForm):
class Meta:
model=answers_model
fields=['question','answer']
|
989,630 | e450e8d9ff79cd68a693b101fba6c9078918e7e8 | #MODULKE FOR CONTROL MOTOR 4WD - 2WD
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
GPIO.setup(12, GPIO.OUT)
GPIO.setup(14, GPIO.OUT)
GPIO.setup(15, GPIO.OUT)
GPIO.setup(18, GPIO.OUT)
pdr = GPIO.PWM(12, 1500) # channel=12 frequency=1500Hz
piz = GPIO.PWM(18, 1500) # channel=18 frequency=1500Hz
pdr.start(0)
piz.start(0)
GPIO.output(14, GPIO.LOW)
GPIO.output(15, GPIO.LOW)
GPIO.output(23, GPIO.LOW)
GPIO.output(24, GPIO.LOW)
def avanzar(vl1,vl2):
pdr.ChangeDutyCycle(vl1)
piz.ChangeDutyCycle(vl2)
GPIO.output(14,GPIO.LOW)
GPIO.output(15,GPIO.HIGH)
GPIO.output(23,GPIO.LOW)
GPIO.output(24,GPIO.HIGH)
#time.sleep(.1)
print("avanzar")
def izquierda(vl1,vl2):
pdr.ChangeDutyCycle(vl1)
piz.ChangeDutyCycle(vl2)
GPIO.output(14,GPIO.LOW)
GPIO.output(15,GPIO.HIGH)
GPIO.output(23,GPIO.HIGH)
GPIO.output(24,GPIO.LOW)
print("izquierda")
def derecha(vl1,vl2):
pdr.ChangeDutyCycle(vl1)
piz.ChangeDutyCycle(vl2)
GPIO.output(14,GPIO.HIGH)
GPIO.output(15,GPIO.LOW)
GPIO.output(23,GPIO.LOW)
GPIO.output(24,GPIO.HIGH)
print("derecha")
def retroceso(vl1,vl2):
pdr.ChangeDutyCycle(vl1)
piz.ChangeDutyCycle(vl2)
GPIO.output(14,GPIO.HIGH)
GPIO.output(15,GPIO.LOW)
GPIO.output(23,GPIO.HIGH)
GPIO.output(24,GPIO.LOW)
print("retroceso")
def Stop():
pdr.ChangeDutyCycle(0)
piz.ChangeDutyCycle(0)
GPIO.output(14,GPIO.HIGH)
GPIO.output(15,GPIO.HIGH)
GPIO.output(23,GPIO.HIGH)
GPIO.output(24,GPIO.HIGH)
print("STOP")
|
989,631 | 856a7d58ce296e8702b0f348d2a9b401dfd1a830 | def ticker():
infile = open('ticker.txt', 'r')
regels = infile.readlines()
infile.close()
tickerdict = {}
for regel in regels:
ticker_regel = regel.split(':')
sleutel = ticker_regel[0]
waarde = ticker_regel[1].strip()
tickerdict[sleutel] = waarde
return tickerdict
tickerbestand = ticker()
bedrijfnaam = input('Geef bedrijf: ')
for naam in tickerbestand:
if naam == bedrijfnaam:
print(tickerbestand[naam])
code = input('Geef code: ')
for naam in tickerbestand:
if code == tickerbestand[naam]:
print(naam) |
989,632 | fd4e38c7c4bbac0d4c29ed90d8383e815289a498 | s2resolution = {
"Traffic" :[2560,1600],
"PeopleOnStreet" :[2560,1600],
"Nebuta" :[2560,1600],
"SteamLocomotive" :[2560,1600],
"Kimono1" :[1920,1080],
"ParkScene" :[1920,1080],
"Cactus" :[1920,1080],
"BQTerrace" :[1920,1080],
"BasketballDrive" :[1920,1080],
"RaceHorses" :[832,480],
"BQMall" :[832,480],
"PartyScene" :[832,480],
"BasketballDrill" :[832,480],
"RaceHorsesLow" :[416,240],
"BQSquare" :[416,240],
"BlowingBubbles" :[416,240],
"BasketballPass" :[416,240],
"FourPeople" :[1280,720],
"Johnny" :[1280,720],
"KristenAndSara" :[1280,720],
"BasketballDrillText":[832,480],
"ChinaSpeed" :[1024,768],
"SlideEditing" :[1280,720],
"SlideShow" :[1280,720],
} |
989,633 | 6510e1076ffa8b238ec7de539a0d01b0a6ae70ff | import os
from page import Page
from random import choice
from file_handler import f_read_json
import config
def award_show(self,award, data, icon):
self.add_text(award,fg="GREEN")
self.add_newline()
try:
winners = data[award]
except KeyError:
self.add_text("No-one has won this award yet...")
self.add_newline()
self.add_newline()
return
max_len = 0
for person in winners:
max_len = max(max_len,len(person))
for person,number in winners.items():
self.add_text(person)
self.move_cursor(x=max_len)
self.add_text("|",fg="RED")
for i in range(number):
self.start_random_fg_color()
self.add_text(icon)
self.end_fg_color()
self.add_newline()
self.add_newline()
awards_on_pages = {
"141":["Tea Maker","CelebriTEA"],
"142":["Moo Cow"],
"143":["Towel Bringer","Squeaky Clean","Spongebob Squarepoints","Cleaning the Bloody Fridge"],
"144":["Honorary Fire Marshall","Double Noughts and Crosses","Lunchtime Goat Award"],
"145":["Boo Cow","Tea Wrecks"],
"146":["Towel Flood","Boo Key","Stolen Pen","Worst Sorting Hat","SNORE-lax","Banana Split","Orange Peel"]
}
def get_page(a):
for n,it in awards_on_pages.items():
for i in it:
if i == a:
return n
return "???"
class AwardsPage(Page):
def __init__(self, page_num,title="",icon=u"\u263B"):
super(AwardsPage, self).__init__(page_num)
self.in_index = False
self.awards = awards_on_pages[page_num]
self.title = title
self.icon = icon
def generate_content(self):
import json
from operator import itemgetter
data = f_read_json('awards')
self.add_title(self.title)
for a in self.awards:
award_show(self,a,data,self.icon)
class AwardsIndex(Page):
def __init__(self, page_num):
super(AwardsIndex, self).__init__(page_num)
self.title = "Awards & Unawards"
self.in_index = True
self.index_num = "140-146"
def background(self):
if config.NAME == "KLBFAX":
from awards import add_award
os.system("scp mscroggs:~/.klb/awards /home/pi/.klbtemp/awards > /dev/null 2>&1")
with open("/home/pi/.klbtemp/awards") as f:
for line in f:
lsp = line.strip("\n").split(",")
add_award(lsp[0],lsp[1])
with open("/home/pi/.klbtemp/awards","w") as f:
pass
os.system("scp /home/pi/.klbtemp/awards mscroggs:~/.klb/awards > /dev/null 2>&1")
def generate_content(self):
import json
from operator import itemgetter
data = f_read_json('awards')
self.add_title("The most awards")
for a,b in data.items():
m = 0
mp = ""
for person,n in b.items():
if n > m:
m = n
mp = person
elif n==m:
mp += " & " + person
self.add_newline()
self.add_text(a,fg="GREEN")
self.add_text(" (see page "+str(get_page(a))+") ")
self.add_text(mp,fg="RED")
sub_page = AwardsIndex("140")
p1 = AwardsPage("141","Tea Awards")
p2 = AwardsPage("142","Mart Cow Awards")
p3 = AwardsPage("143","Kitchen Awards")
p4 = AwardsPage("144","Other Awards")
p5 = AwardsPage("145","Tea Unawards","o_0")
p6 = AwardsPage("146","Other Unawards","0_o")
|
989,634 | 4317e617be9386217d16d5d7489356c57e888643 | #!/usr/bin/env python
# encoding: utf-8
name = ""
shortDesc = ""
longDesc = """
"""
entry(
index = 0,
label = "C2H2O + C2H2O-2 <=> C4H4O2",
degeneracy = 1.0,
kinetics = Arrhenius(A=(178,'m^3/(mol*s)'), n=0, Ea=(73.999,'kJ/mol','+|-',0.74), T0=(1,'K'), Tmin=(498,'K'), Tmax=(596,'K'), Pmin=(800,'Pa'), Pmax=(15300,'Pa')),
reference = Article(
authors = [b'Blake, P.G.', b'Davis, H.H.'],
title = b'Kinetics of Dimerisation of Gaseous Keten',
journal = b'J. Appl. Chem. Biotechnol.',
volume = b'22',
pages = b'491',
year = b'1972',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1972BLA/DAV491:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00007002
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00007002/rk00000001.xml
Bath gas: H2C=C=O
Excitation technique: Thermal
Analytical technique: Vis-UV absorption
""",
)
entry(
index = 1,
label = "C4H8 <=> C2H4 + C2H4-2",
degeneracy = 16.0,
kinetics = Arrhenius(A=(3.16e+16,'s^-1'), n=0, Ea=(274.378,'kJ/mol'), T0=(1,'K'), Tmin=(969,'K'), Tmax=(1280,'K'), Pmin=(26.66,'Pa'), Pmax=(26.66,'Pa')),
reference = Article(
authors = [b'Beadle, P.C.', b'Golden, D.M.', b'King, K.D.', b'Benson, S.W.'],
title = b'Pyrolysis of Cyclobutane',
journal = b'J. Am. Chem. Soc.',
volume = b'94',
pages = b'2943',
year = b'1972',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1972BEA/GOL2943:2',
),
referenceType = "review",
shortDesc = """Experimental value and limited review""",
longDesc =
"""
PrIMe Reaction: r00006456
Bath gas: Ar
""",
)
entry(
index = 2,
label = "C8H12O2 <=> C4H6O + C4H6O-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(6.03e+13,'s^-1'), n=0, Ea=(202.873,'kJ/mol'), T0=(1,'K'), Tmin=(656,'K'), Tmax=(793,'K')),
reference = Article(
authors = [b'Vala, M.', b'Baiardo, J.', b'Latham, D.', b'Mukherjee, R.', b'Pascyz, S.'],
title = b'Fourier transform infrared kinetic study of the thermal decomposition of tetramethyl-1-3-cyclobutanedione and dimethylketene',
journal = b'J. Indian Chem. Soc.',
volume = b'63',
pages = b'16',
year = b'1986',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1986VAL/BAI16:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00009168
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00009168/rk00000001.xml
Excitation technique: Thermal
Analytical technique: Fourier transform (FTIR)
""",
)
entry(
index = 3,
label = "C4H8 <=> C2H4 + C2H4-2",
degeneracy = 16.0,
kinetics = Arrhenius(A=(3.98e+15,'s^-1'), n=0, Ea=(261.906,'kJ/mol'), T0=(1,'K'), Tmin=(644,'K'), Tmax=(1089,'K'), Pmin=(2133,'Pa'), Pmax=(307000,'Pa')),
reference = Article(
authors = [b'Lewis, D.K.', b'Bergmann, J.', b'Manjoney, R.', b'Paddock, R.', b'Kaira, B.L.'],
title = b'Rates of reactions of cyclopropane, cyclobutane, cyclopentene, and cyclohexene in the presence of boron trichloride',
journal = b'J. Phys. Chem.',
volume = b'88',
pages = b'4112',
year = b'1984',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1984LEW/BER4112:4',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00006456
Bath gas: Cyclobutane
Pressure dependence: None reported
Experimental procedure: Other
Excitation technique: Thermal
Time resolution: By end product analysis
Analytical technique: Gas chromatography
Two experimental set-ups were used: experiments at 644-750 K and 16-25 torr were performed in a static reactor, while the shock tube methodology was used at 939-1089 K with shock pressures of about 2-3 atm. The rate parameters are from the literature but provide an excellent fit to the present data.
""",
)
entry(
index = 4,
label = "C4H8 <=> C2H4 + C2H4-2",
degeneracy = 16.0,
kinetics = Arrhenius(A=(1.52e+14,'s^-1'), n=0, Ea=(229.479,'kJ/mol','+|-',11.474), T0=(1,'K'), Tmin=(891,'K'), Tmax=(1080,'K'), Pmin=(18000,'Pa'), Pmax=(110000,'Pa')),
reference = Article(
authors = [b'Barnard, J.A.', b'Cocks, A.T.', b'Lee, R.K.Y.'],
title = b'Kinetics of the Thermal Unimolecular Reactions of Cyclopropane and Cyclobutane behind Reflected Shock Waves',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'70',
pages = b'1782',
year = b'1974',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1974BAR/COC1782:4',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00006456
Bath gas: Ar
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 5,
label = "C4H8 <=> C2H4 + C2H4-2",
degeneracy = 16.0,
kinetics = Arrhenius(A=(2.63e+15,'s^-1'), n=0, Ea=(259.412,'kJ/mol'), T0=(1,'K'), Tmin=(969,'K'), Tmax=(1280,'K'), Pmin=(26.66,'Pa'), Pmax=(26.66,'Pa')),
reference = Article(
authors = [b'Beadle, P.C.', b'Golden, D.M.', b'King, K.D.', b'Benson, S.W.'],
title = b'Pyrolysis of Cyclobutane',
journal = b'J. Am. Chem. Soc.',
volume = b'94',
pages = b'2943',
year = b'1972',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1972BEA/GOL2943:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00006456
Bath gas: Ar
Excitation technique: Thermal
Analytical technique: Mass spectrometry
""",
)
entry(
index = 6,
label = "C4H8 <=> C2H4 + C2H4-2",
degeneracy = 16.0,
kinetics = Arrhenius(A=(7.01e+15,'s^-1','+|-',7e+13), n=0, Ea=(264.4,'kJ/mol','+|-',1.255), T0=(1,'K'), Tmin=(683,'K'), Tmax=(773,'K'), Pmin=(13.33,'Pa'), Pmax=(2666,'Pa')),
reference = Article(
authors = [b'Vreeland, R.W.', b'Swinehart, D.F.'],
title = b'A mass spectometric investigation of the thermal decomposition of cyclobutane at low pressures',
journal = b'J. Am. Chem. Soc.',
volume = b'85',
pages = b'3349-3353',
year = b'1963',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1963VRE/SWI3349-3353:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00006456
Bath gas: Cyclobutane
Excitation technique: Thermal
Analytical technique: Mass spectrometry
""",
)
entry(
index = 7,
label = "C4H8 <=> C2H4 + C2H4-2",
degeneracy = 16.0,
kinetics = Arrhenius(A=(4.2e+15,'s^-1','+|-',8.4e+13), n=0, Ea=(261.906,'kJ/mol'), T0=(1,'K'), Tmin=(690,'K'), Tmax=(733,'K'), Pmin=(86700,'Pa'), Pmax=(200000,'Pa')),
reference = Article(
authors = [b'Carr, R.W., Jr.', b'Walters, W.D.'],
title = b'The thermal decomposition of cyclobutane',
journal = b'J. Phys. Chem.',
volume = b'67',
pages = b'1370-1372',
year = b'1963',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1963CAR/WAL1370-1372:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00006456
Bath gas: Cyclobutane
Excitation technique: Thermal
Analytical technique: Mass spectrometry
""",
)
entry(
index = 8,
label = "C4H8 <=> C2H4 + C2H4-2",
degeneracy = 16.0,
kinetics = Arrhenius(A=(2e+15,'s^-1','*|/',10), n=0, Ea=(257.749,'kJ/mol','+|-',10.31), T0=(1,'K'), Tmin=(671,'K'), Tmax=(723,'K'), Pmin=(13.33,'Pa'), Pmax=(5733,'Pa')),
reference = Article(
authors = [b'Butler, J.N.', b'Ogawa, R.B.'],
title = b'The thermal decomposition of cyclobutane at low pressures',
journal = b'J. Am. Chem. Soc.',
volume = b'85',
pages = b'3346-3349',
year = b'1963',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1963BUT/OGA3346-3349:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00006456
Uncertainty: 10.0
Bath gas: Cyclobutane
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 9,
label = "C4H8 <=> C2H4 + C2H4-2",
degeneracy = 16.0,
kinetics = Arrhenius(A=(4e+15,'s^-1'), n=0, Ea=(261.906,'kJ/mol'), T0=(1,'K'), Tmin=(693,'K'), Tmax=(741,'K'), Pmin=(133,'Pa'), Pmax=(133000,'Pa')),
reference = Article(
authors = [b'Genaux, C.T.', b'Kern, F.', b'Walters, W.D.'],
title = b'The thermal decomposition of cyclobutane',
journal = b'J. Am. Chem. Soc.',
volume = b'75',
pages = b'6196-6199',
year = b'1953',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1953GEN/KER6196-6199:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00006456
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00006456/rk00000001.xml
Bath gas: Cyclobutane
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 10,
label = "C4H8 <=> C2H4 + C2H4-2",
degeneracy = 16.0,
kinetics = Arrhenius(A=(1.95e+20,'s^-1'), n=-1.26, Ea=(283.215,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), Pmin=(101000,'Pa'), Pmax=(101000,'Pa')),
reference = Article(
authors = [b'Sirjean, B.', b'Glaude, P.A.', b'Ruiz-Lopez, M.F.', b'Fournet, R.'],
title = b'Detailed kinetic study of the ring opening of cycloalkanes by CBS-QB3 calculations',
journal = b'J. Phys. Chem. A',
volume = b'110',
pages = b'12693-12704',
year = b'2006',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=2006SIR/GLA12693-12704:17',
),
referenceType = "theory",
shortDesc = """Transition state theory""",
longDesc =
"""
PrIMe Reaction: r00006456
Pressure dependence: None reported
Reaction potential energy surface was studied using quantum chemistry and rate constants were calculated using transition state theory. This is the rate expression for the "global" reaction proceeding via formation of a biradical intermediate.
""",
)
entry(
index = 11,
label = "C4H8 <=> C2H4 + C2H4-2",
degeneracy = 16.0,
kinetics = Arrhenius(A=(3.31e+21,'s^-1'), n=-1.61, Ea=(285.181,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), Pmin=(101000,'Pa'), Pmax=(101000,'Pa')),
reference = Article(
authors = [b'Sirjean, B.', b'Glaude, P.A.', b'Ruiz-Lopez, M.F.', b'Fournet, R.'],
title = b'Detailed kinetic study of the ring opening of cycloalkanes by CBS-QB3 calculations',
journal = b'J. Phys. Chem. A',
volume = b'110',
pages = b'12693-12704',
year = b'2006',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=2006SIR/GLA12693-12704:18',
),
referenceType = "theory",
shortDesc = """Transition state theory""",
longDesc =
"""
PrIMe Reaction: r00006456
Pressure dependence: None reported
Reaction potential energy surface was studied using quantum chemistry and rate constants were calculated using transition state theory. This is the rate expression for the "global" reaction proceeding via formation of a biradical intermediate.
""",
)
entry(
index = 12,
label = "C2H4 + C2H4-2 <=> C4H8",
degeneracy = 16.0,
kinetics = Arrhenius(A=(69200,'m^3/(mol*s)'), n=0, Ea=(182.918,'kJ/mol'), T0=(1,'K'), Tmin=(723,'K'), Tmax=(786,'K'), Pmin=(40000,'Pa'), Pmax=(173000,'Pa')),
reference = Article(
authors = [b'Quick, L.M.', b'Knecht, D.A.', b'Back, M.H.'],
title = b'Kinetics of the Formation of Cyclobutane from Ethylene',
journal = b'Int. J. Chem. Kinet.',
volume = b'4',
pages = b'61',
year = b'1972',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1972QUI/KNE61:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00006456
Bath gas: C2H4
Excitation technique: Thermal
Analytical technique: Mass spectrometry
""",
)
entry(
index = 13,
label = "C3H6O <=> C2H4 + CH2O",
degeneracy = 8.0,
kinetics = Arrhenius(A=(2.63e+15,'s^-1','*|/',2.04), n=0, Ea=(259.412,'kJ/mol','+|-',2.594), T0=(1,'K'), Tmin=(668,'K'), Tmax=(758,'K'), Pmin=(99.99,'Pa'), Pmax=(6999,'Pa')),
reference = Article(
authors = [b'Zalotai, L.', b'Hunyadi-Zoltan, Zs.', b'Berces, T.', b'Marta, F.'],
title = b'Kinetics of gas phase decomposition of oxetan and oxetan-2,2-d2',
journal = b'Int. J. Chem. Kinet.',
volume = b'15',
pages = b'505',
year = b'1983',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1983ZAL/HUN505:1',
),
referenceType = "experiment",
shortDesc = """High or low pressure extrapolation""",
longDesc =
"""
PrIMe Reaction: r00007176
Uncertainty: 2.04
Bath gas: Oxetane
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 14,
label = "C3H6O <=> C2H4 + CH2O",
degeneracy = 8.0,
kinetics = Arrhenius(A=(5.13e+15,'s^-1','*|/',2.04), n=0, Ea=(263.569,'kJ/mol','+|-',2.636), T0=(1,'K'), Tmin=(693,'K'), Tmax=(753,'K'), Pmin=(53.33,'Pa'), Pmax=(15600,'Pa')),
reference = Article(
authors = [b'Holbrook, K.A.', b'Scott, R.A.'],
title = b'Gas-phase Thermal Unimolecular Decomposition of Oxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'71',
pages = b'1849',
year = b'1975',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1975HOL/SCO1849:1',
),
referenceType = "experiment",
shortDesc = """High or low pressure extrapolation""",
longDesc =
"""
PrIMe Reaction: r00007176
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00007176/rk00000001.xml
Uncertainty: 2.04
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 15,
label = "C5H10 <=> C3H6 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(1.06e+16,'s^-1','+|-',9.5e+14), n=0, Ea=(264.4,'kJ/mol','+|-',1.255), T0=(1,'K'), Tmin=(683,'K'), Tmax=(763,'K'), Pmin=(1.33,'Pa'), Pmax=(1333,'Pa')),
reference = Article(
authors = [b'Thomas, T.F.', b'Conn, P.J.', b'Swinehart, D.F.'],
title = b'Unimolecular reactions of methylcyclobutane at low pressures',
journal = b'J. Am. Chem. Soc.',
volume = b'91',
pages = b'7611-7616',
year = b'1969',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1969THO/CON7611-7616:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00008107
Bath gas: Methylcyclobutane
Excitation technique: Thermal
Analytical technique: Mass spectrometry
""",
)
entry(
index = 16,
label = "C5H10 <=> C3H6 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(1.9e+15,'s^-1'), n=0, Ea=(254.423,'kJ/mol'), T0=(1,'K'), Tmin=(693,'K'), Tmax=(703,'K'), Pmin=(933,'Pa'), Pmax=(55600,'Pa')),
reference = Article(
authors = [b'Pataracchia, A.F.', b'Walters, W.D.'],
title = b'The thermal decomposition of methylcyclobutane at low pressures',
journal = b'J. Phys. Chem.',
volume = b'68',
pages = b'3894-3899',
year = b'1964',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1964PAT/WAL3894-3899:1',
),
referenceType = "experiment",
shortDesc = """High or low pressure extrapolation""",
longDesc =
"""
PrIMe Reaction: r00008107
Bath gas: Methylcyclobutane
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 17,
label = "C5H10 <=> C3H6 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(2.4e+15,'s^-1'), n=0, Ea=(256.086,'kJ/mol'), T0=(1,'K'), Tmin=(683,'K'), Tmax=(723,'K'), Pmin=(933,'Pa'), Pmax=(55600,'Pa')),
reference = Article(
authors = [b'Das, M.N.', b'Walters, W.D.'],
title = b'The thermal decomposition of methylcyclobutane',
journal = b'Z. Phys. Chem. (Neue Folge)',
volume = b'15',
pages = b'22-33',
year = b'1958',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1958DAS/WAL22-33:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00008107
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00008107/rk00000001.xml
Bath gas: Methylcyclobutane
Excitation technique: Thermal
Analytical technique: Mass spectrometry
""",
)
entry(
index = 18,
label = "C4H6O <=> C2H4 + C2H2O",
degeneracy = 8.0,
kinetics = Arrhenius(A=(5e+14,'s^-1'), n=0, Ea=(220.334,'kJ/mol'), T0=(1,'K'), Tmin=(650,'K'), Tmax=(1250,'K'), Pmin=(440,'Pa'), Pmax=(1307,'Pa')),
reference = Article(
authors = [b'Braun, W.', b'McNesby, J.R.', b'Scheer, M.D.'],
title = b'A comparative rate method for the study of unimolecular falloff behavior',
journal = b'J. Phys. Chem.',
volume = b'88',
pages = b'1846',
year = b'1984',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1984BRA/MCN1846:1',
),
referenceType = "experiment",
shortDesc = """High or low pressure extrapolation""",
longDesc =
"""
PrIMe Reaction: r00009414
Bath gas: SF6
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 19,
label = "C4H6O <=> C2H4 + C2H2O",
degeneracy = 8.0,
kinetics = Arrhenius(A=(3.6e+14,'s^-1'), n=0, Ea=(217.008,'kJ/mol'), T0=(1,'K'), Tmin=(633,'K'), Tmax=(679,'K'), Pmin=(200,'Pa'), Pmax=(5066,'Pa')),
reference = Article(
authors = [b'McGee, T.H.', b'Schleifer, A.'],
title = b'Thermal Decomposition of Cyclobutanone',
journal = b'J. Phys. Chem.',
volume = b'76',
pages = b'963',
year = b'1972',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1972MCG/SCH963:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00009414
Bath gas: Cyclobutanone
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 20,
label = "C4H6O <=> C2H4 + C2H2O",
degeneracy = 8.0,
kinetics = Arrhenius(A=(3.6e+14,'s^-1'), n=0, Ea=(217.839,'kJ/mol'), T0=(1,'K'), Tmin=(606,'K'), Tmax=(646,'K'), Pmin=(1333,'Pa'), Pmax=(11700,'Pa')),
reference = Article(
authors = [b'Das, M.N.', b'Kern, F.', b'Coyle, T.D.', b'Walters, W.D.'],
title = b'The thermal decomposition of cyclobutanone',
journal = b'J. Am. Chem. Soc.',
volume = b'76',
pages = b'6271-6274',
year = b'1954',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1954DAS/KER6271-6274:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00009414
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00009414/rk00000001.xml
Bath gas: Cyclobutanone
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 21,
label = "C4H8O <=> C3H6 + CH2O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(3.39e+14,'s^-1','*|/',1.45), n=0, Ea=(249.434,'kJ/mol','+|-',2.494), T0=(1,'K'), Tmin=(659,'K'), Tmax=(757,'K'), Pmin=(1400,'Pa'), Pmax=(1400,'Pa')),
reference = Article(
authors = [b'Zalotai, L.', b'Berces, T.', b'Marta, F.'],
title = b'Collisional energy transfer in the decomposition of 2-methyloxetane and 3-methyloxetane, I. Gas/gas collisions',
journal = b'React. Kinet. Catal. Lett.',
volume = b'42',
pages = b'79',
year = b'1990',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1990ZAL/BER79:2',
),
referenceType = "experiment",
shortDesc = """High or low pressure extrapolation""",
longDesc =
"""
PrIMe Reaction: r00010718
Uncertainty: 1.45
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 22,
label = "C4H8O <=> C3H6 + CH2O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(3.39e+14,'s^-1','*|/',1.32), n=0, Ea=(249.434,'kJ/mol','+|-',2.494), T0=(1,'K'), Tmin=(660,'K'), Tmax=(760,'K'), Pmin=(1400,'Pa'), Pmax=(3000,'Pa')),
reference = Article(
authors = [b'Zalotai, L.', b'Berces, T.', b'Marta, F.'],
title = b'Kinetics and energy transfer in the thermal decomposition of 2-methyloxetane and 3-methyloxetane',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'86',
pages = b'21',
year = b'1990',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1990ZAL/BER21:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00010718
Uncertainty: 1.3200001
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 23,
label = "C4H8O <=> C3H6 + CH2O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(4.4e+14,'s^-1'), n=0, Ea=(249.434,'kJ/mol','+|-',4.997), T0=(1,'K'), Tmin=(730,'K'), Tmax=(756,'K'), Pmin=(800,'Pa'), Pmax=(1893,'Pa')),
reference = Article(
authors = [b'Hammonds, P.', b'Holbrook, K.A.'],
title = b'Thermolyses of 2-Methyloxetan and 2,2-Dimethyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'78',
pages = b'2195',
year = b'1982',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1982HAM/HOL2195:2',
),
referenceType = "experiment",
shortDesc = """Derived from fitting to a complex mechanism""",
longDesc =
"""
PrIMe Reaction: r00010718
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00010718/rk00000001.xml
Bath gas: Oxetane, 2-methyl-
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 24,
label = "C4H8O-2 <=> C2H4 + C2H4O",
degeneracy = 4.0,
kinetics = Arrhenius(A=(4.68e+15,'s^-1','*|/',1.48), n=0, Ea=(269.389,'kJ/mol','+|-',2.702), T0=(1,'K'), Tmin=(659,'K'), Tmax=(757,'K'), Pmin=(1400,'Pa'), Pmax=(1400,'Pa')),
reference = Article(
authors = [b'Zalotai, L.', b'Berces, T.', b'Marta, F.'],
title = b'Collisional energy transfer in the decomposition of 2-methyloxetane and 3-methyloxetane, I. Gas/gas collisions',
journal = b'React. Kinet. Catal. Lett.',
volume = b'42',
pages = b'79',
year = b'1990',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1990ZAL/BER79:3',
),
referenceType = "experiment",
shortDesc = """High or low pressure extrapolation""",
longDesc =
"""
PrIMe Reaction: r00010719
Uncertainty: 1.48
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 25,
label = "C4H8O-2 <=> C2H4 + C2H4O",
degeneracy = 4.0,
kinetics = Arrhenius(A=(4.68e+15,'s^-1','*|/',1.48), n=0, Ea=(269.389,'kJ/mol','+|-',2.702), T0=(1,'K'), Tmin=(660,'K'), Tmax=(760,'K'), Pmin=(1400,'Pa'), Pmax=(3000,'Pa')),
reference = Article(
authors = [b'Zalotai, L.', b'Berces, T.', b'Marta, F.'],
title = b'Kinetics and energy transfer in the thermal decomposition of 2-methyloxetane and 3-methyloxetane',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'86',
pages = b'21',
year = b'1990',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1990ZAL/BER21:3',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00010719
Uncertainty: 1.48
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 26,
label = "C4H8O-2 <=> C2H4 + C2H4O",
degeneracy = 4.0,
kinetics = Arrhenius(A=(3.36e+14,'s^-1'), n=0, Ea=(249.434,'kJ/mol','+|-',4.997), T0=(1,'K'), Tmin=(703,'K'), Tmax=(756,'K'), Pmin=(800,'Pa'), Pmax=(1893,'Pa')),
reference = Article(
authors = [b'Hammonds, P.', b'Holbrook, K.A.'],
title = b'Thermolyses of 2-Methyloxetan and 2,2-Dimethyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'78',
pages = b'2195',
year = b'1982',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1982HAM/HOL2195:4',
),
referenceType = "experiment",
shortDesc = """Derived from fitting to a complex mechanism""",
longDesc =
"""
PrIMe Reaction: r00010719
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00010719/rk00000001.xml
Bath gas: Oxetane, 2-methyl-
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 27,
label = "C4H8O-3 <=> C3H6 + CH2O-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(2.4e+15,'s^-1','*|/',1.86), n=0, Ea=(258.58,'kJ/mol','+|-',2.586), T0=(1,'K'), Tmin=(660,'K'), Tmax=(760,'K'), Pmin=(2000,'Pa'), Pmax=(3000,'Pa')),
reference = Article(
authors = [b'Zalotai, L.', b'Berces, T.', b'Marta, F.'],
title = b'Kinetics and energy transfer in the thermal decomposition of 2-methyloxetane and 3-methyloxetane',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'86',
pages = b'21',
year = b'1990',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1990ZAL/BER21:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00010716
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00010716/rk00000001.xml
Uncertainty: 1.86
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 28,
label = "C5H8O <=> C3H6 + C2H2O-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(3.74e+14,'s^-1','*|/',1.12), n=0, Ea=(200.379,'kJ/mol'), T0=(1,'K'), Tmin=(552,'K'), Tmax=(606,'K'), Pmin=(120,'Pa'), Pmax=(1880,'Pa')),
reference = Article(
authors = [b'Frey, H.M.', b'Watts, H.P.', b'Stevens, I.D.R.'],
title = b'The thermal unimolecular decomposition of 3-methylcyclobutanone',
journal = b'J. Chem. Soc. Faraday Trans. 2',
volume = b'83',
pages = b'601',
year = b'1987',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1987FRE/WAT601:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00009420
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00009420/rk00000001.xml
Uncertainty: 1.12
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 29,
label = "C6H10 <=> C4H6 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(3.16e+14,'s^-1'), n=0, Ea=(208.693,'kJ/mol'), T0=(1,'K'), Tmin=(839,'K'), Tmax=(965,'K'), Pmin=(14700,'Pa'), Pmax=(26700,'Pa')),
reference = Article(
authors = [b'Lewis, D.K.', b'Charney, D.J.', b'Kalra, B.L.', b'Plate, A-M.', b'Woodard, M.H.'],
title = b'Kinetics of the thermal isomerizations of gaseous vinylcyclopropane and vinylcyclobutane',
journal = b'J. Phys. Chem. A',
volume = b'101',
pages = b'4097-4102',
year = b'1997',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1997LEW/CHA4097-4102:3',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00011373
Bath gas: Ar
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 30,
label = "C6H10 <=> C4H6 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(7.41e+14,'s^-1','*|/',1.17), n=0, Ea=(212.019,'kJ/mol'), T0=(1,'K'), Tmin=(569,'K'), Tmax=(639,'K'), Pmin=(133,'Pa'), Pmax=(1800,'Pa')),
reference = Article(
authors = [b'Frey, H.M.', b'Pottinger, R.'],
title = b'Thermal unimolecular reactions of vinylcyclobutane and isopropenylcyclobutane',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'74',
pages = b'1827',
year = b'1978',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1978FRE/POT1827:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00011373
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00011373/rk00000001.xml
Uncertainty: 1.17
Bath gas: Ethenylcyclobutane
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 31,
label = "C5H10O <=> C3H6O-2 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(1.51e+15,'s^-1','*|/',3.31), n=0, Ea=(256.086,'kJ/mol','+|-',7.683), T0=(1,'K'), Tmin=(692,'K'), Tmax=(735,'K'), Pmin=(133,'Pa'), Pmax=(2133,'Pa')),
reference = Article(
authors = [b'Dirjal, N.K.', b'Holbrook, K.A.'],
title = b'Thermal unimolecular decomposition of cyclobutanemethanol',
journal = b'J. Chem. Soc. Faraday Trans.',
volume = b'87',
pages = b'691-693',
year = b'1991',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1991DIR/HOL691-693:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00012719
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00012719/rk00000001.xml
Uncertainty: 3.3099999
Bath gas: Cyclobutanemethanol
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 32,
label = "C6H12 <=> C4H8-2 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(3.6e+15,'s^-1'), n=0, Ea=(259.412,'kJ/mol','+|-',5.188), T0=(1,'K'), Tmin=(693,'K'), Tmax=(733,'K'), Pmin=(1333,'Pa'), Pmax=(26700,'Pa')),
reference = Article(
authors = [b'Wellman, R.E.', b'Walters, W.D.'],
title = b'The thermal decomposition of ethylcyclobutane',
journal = b'J. Am. Chem. Soc.',
volume = b'79',
pages = b'1542-1546',
year = b'1957',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1957WEL/WAL1542-1546:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00012797
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00012797/rk00000001.xml
Bath gas: Cyclobutane, ethyl-
Excitation technique: Thermal
Analytical technique: Mass spectrometry
""",
)
entry(
index = 33,
label = "C5H10O-2 <=> C4H8-3 + CH2O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(3.02e+13,'s^-1'), n=0, Ea=(221.996,'kJ/mol','+|-',2.22), T0=(1,'K'), Tmin=(675,'K'), Tmax=(744,'K'), Pmin=(960,'Pa'), Pmax=(1227,'Pa')),
reference = Article(
authors = [b'Hammonds, P.', b'Holbrook, K.A.'],
title = b'Thermolyses of 2-Methyloxetan and 2,2-Dimethyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'78',
pages = b'2195',
year = b'1982',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1982HAM/HOL2195:6',
),
referenceType = "experiment",
shortDesc = """Derived from fitting to a complex mechanism""",
longDesc =
"""
PrIMe Reaction: r00012962
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00012962/rk00000001.xml
Bath gas: Oxetane, 2,2-dimethyl-
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 34,
label = "C5H10O-3 <=> C2H4 + C3H6O-3",
degeneracy = 4.0,
kinetics = Arrhenius(A=(3.63e+15,'s^-1'), n=0, Ea=(270.22,'kJ/mol','+|-',5.413), T0=(1,'K'), Tmin=(675,'K'), Tmax=(744,'K'), Pmin=(960,'Pa'), Pmax=(1227,'Pa')),
reference = Article(
authors = [b'Hammonds, P.', b'Holbrook, K.A.'],
title = b'Thermolyses of 2-Methyloxetan and 2,2-Dimethyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'78',
pages = b'2195',
year = b'1982',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1982HAM/HOL2195:7',
),
referenceType = "experiment",
shortDesc = """Derived from fitting to a complex mechanism""",
longDesc =
"""
PrIMe Reaction: r00012963
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00012963/rk00000001.xml
Bath gas: Oxetane, 2,2-dimethyl-
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 35,
label = "C5H10O-4 <=> C4H8-3 + CH2O-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(3.8e+15,'s^-1','+|-',1.1e+14), n=0, Ea=(253.591,'kJ/mol'), T0=(1,'K'), Tmin=(673,'K'), Tmax=(723,'K'), Pmin=(1333,'Pa'), Pmax=(9733,'Pa')),
reference = Article(
authors = [b'Cohoe, G.F.', b'Walters, W.D.'],
title = b'The kinetics of the thermal decomposition of 3,3-dimethyloxetane',
journal = b'J. Phys. Chem.',
volume = b'71',
pages = b'2326-2331',
year = b'1967',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1967COH/WAL2326-2331:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00013035
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00013035/rk00000001.xml
Bath gas: Oxetane, 3,3-dimethyl-
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 36,
label = "C5H8O-2 <=> C3H4O + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(2.7e+14,'s^-1','+|-',1.1e+13), n=0, Ea=(222.828,'kJ/mol','+|-',2.228), T0=(1,'K'), Tmin=(633,'K'), Tmax=(673,'K'), Pmin=(1200,'Pa'), Pmax=(4666,'Pa')),
reference = Article(
authors = [b'Roquitte, B.C.', b'Walters, W.D.'],
title = b'The thermal decomposition of cyclobutanecarboxaldehyde',
journal = b'J. Am. Chem. Soc.',
volume = b'84',
pages = b'4049-4052',
year = b'1962',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1962ROQ/WAL4049-4052:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00011611
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00011611/rk00000001.xml
Bath gas: Cyclobutanecarboxaldehyde
Excitation technique: Thermal
Analytical technique: IR absorption
""",
)
entry(
index = 37,
label = "C6H12-2 <=> C2H4 + C4H8-4",
degeneracy = 6.0,
kinetics = Arrhenius(A=(3.7e+15,'s^-1'), n=0, Ea=(263.569,'kJ/mol'), T0=(1,'K'), Tmin=(653,'K'), Tmax=(703,'K'), Pmin=(773,'Pa'), Pmax=(52400,'Pa')),
reference = Article(
authors = [b'Gerberich, H.R.', b'Walters, W.D.'],
title = b'The thermal decomposition of cis-1,2-dimethylcyclobutane',
journal = b'J. Am. Chem. Soc.',
volume = b'83',
pages = b'3935-3939',
year = b'1961',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1961GER/WAL3935-3939:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00015648
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00015648/rk00000001.xml
Bath gas: Cyclobutane, 1,2-dimethyl-, cis-
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 38,
label = "C6H12-2 <=> C2H4 + C4H8-4",
degeneracy = 6.0,
kinetics = Arrhenius(A=(2.9e+15,'s^-1'), n=0, Ea=(265.232,'kJ/mol'), T0=(1,'K'), Tmin=(663,'K'), Tmax=(713,'K'), Pmin=(1733,'Pa'), Pmax=(50000,'Pa')),
reference = Article(
authors = [b'Gerberich, H.R.', b'Walters, W.D.'],
title = b'The thermal decomposition of trans-1,2-dimethylcyclobutane',
journal = b'J. Am. Chem. Soc.',
volume = b'83',
pages = b'4884-4888',
year = b'1961',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1961GER/WAL4884-4888:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00015651
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00015651/rk00000001.xml
Bath gas: Cyclobutane, 1,2-dimethyl-, trans-
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 39,
label = "C6H12-3 <=> C3H6 + C3H6-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(3e+15,'s^-1'), n=0, Ea=(252.76,'kJ/mol'), T0=(1,'K'), Tmin=(653,'K'), Tmax=(703,'K'), Pmin=(773,'Pa'), Pmax=(52400,'Pa')),
reference = Article(
authors = [b'Gerberich, H.R.', b'Walters, W.D.'],
title = b'The thermal decomposition of cis-1,2-dimethylcyclobutane',
journal = b'J. Am. Chem. Soc.',
volume = b'83',
pages = b'3935-3939',
year = b'1961',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1961GER/WAL3935-3939:3',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00015649
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00015649/rk00000001.xml
Bath gas: Cyclobutane, 1,2-dimethyl-, cis-
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 40,
label = "C6H12-3 <=> C3H6 + C3H6-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(2.8e+15,'s^-1'), n=0, Ea=(257.749,'kJ/mol'), T0=(1,'K'), Tmin=(663,'K'), Tmax=(713,'K'), Pmin=(1733,'Pa'), Pmax=(50000,'Pa')),
reference = Article(
authors = [b'Gerberich, H.R.', b'Walters, W.D.'],
title = b'The thermal decomposition of trans-1,2-dimethylcyclobutane',
journal = b'J. Am. Chem. Soc.',
volume = b'83',
pages = b'4884-4888',
year = b'1961',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1961GER/WAL4884-4888:3',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00015652
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00015652/rk00000001.xml
Bath gas: Cyclobutane, 1,2-dimethyl-, trans-
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 41,
label = "C5H10O-5 <=> C4H8-5 + CH2O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(3.09e+15,'s^-1','*|/',1.82), n=0, Ea=(266.063,'kJ/mol','+|-',2.661), T0=(1,'K'), Tmin=(688,'K'), Tmax=(756,'K'), Pmin=(267,'Pa'), Pmax=(4266,'Pa')),
reference = Article(
authors = [b'Holbrook, K.A.', b'Scott, R.A.'],
title = b'Gas-phase Unimolecular Pyrolyses of cis- and trans-2,3-Dimethyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'70',
pages = b'43',
year = b'1974',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1974HOL/SCO43:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016231
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016231/rk00000001.xml
Uncertainty: 1.8200001
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 42,
label = "C5H10O-5 <=> C4H8-5 + CH2O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(1.74e+15,'s^-1','*|/',1.78), n=0, Ea=(261.074,'kJ/mol','+|-',2.611), T0=(1,'K'), Tmin=(688,'K'), Tmax=(756,'K'), Pmin=(267,'Pa'), Pmax=(4266,'Pa')),
reference = Article(
authors = [b'Holbrook, K.A.', b'Scott, R.A.'],
title = b'Gas-phase Unimolecular Pyrolyses of cis- and trans-2,3-Dimethyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'70',
pages = b'43',
year = b'1974',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1974HOL/SCO43:3',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016297
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016297/rk00000001.xml
Uncertainty: 1.78
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 43,
label = "C5H10O-6 <=> C3H6 + C2H4O-2",
degeneracy = 2.0,
kinetics = Arrhenius(A=(8.13e+15,'s^-1','*|/',1.78), n=0, Ea=(270.22,'kJ/mol','+|-',2.702), T0=(1,'K'), Tmin=(688,'K'), Tmax=(756,'K'), Pmin=(267,'Pa'), Pmax=(4266,'Pa')),
reference = Article(
authors = [b'Holbrook, K.A.', b'Scott, R.A.'],
title = b'Gas-phase Unimolecular Pyrolyses of cis- and trans-2,3-Dimethyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'70',
pages = b'43',
year = b'1974',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1974HOL/SCO43:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016232
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016232/rk00000001.xml
Uncertainty: 1.78
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 44,
label = "C5H10O-6 <=> C3H6 + C2H4O-2",
degeneracy = 2.0,
kinetics = Arrhenius(A=(5.01e+15,'s^-1','*|/',1.66), n=0, Ea=(264.4,'kJ/mol','+|-',2.644), T0=(1,'K'), Tmin=(688,'K'), Tmax=(756,'K'), Pmin=(267,'Pa'), Pmax=(4266,'Pa')),
reference = Article(
authors = [b'Holbrook, K.A.', b'Scott, R.A.'],
title = b'Gas-phase Unimolecular Pyrolyses of cis- and trans-2,3-Dimethyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'70',
pages = b'43',
year = b'1974',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1974HOL/SCO43:4',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016298
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016298/rk00000001.xml
Uncertainty: 1.66
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 45,
label = "C7H12 <=> C2H4 + C5H8",
degeneracy = 6.0,
kinetics = Arrhenius(A=(6.92e+14,'s^-1'), n=0, Ea=(254.423,'kJ/mol'), T0=(1,'K'), Tmin=(699,'K'), Tmax=(737,'K'), Pmin=(267,'Pa'), Pmax=(2666,'Pa')),
reference = Article(
authors = [b'Ellis, R.J.', b'Frey, H.M.'],
title = b'The thermal unimolecular decomposition of bicyclo[3,2,0]-heptane',
journal = b'J. Chem. Soc.',
pages = b'4184-4187',
year = b'1964',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1964ELL/FRE4184-4187:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00006431
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00006431/rk00000001.xml
Bath gas: Bicyclo[3.2.0]heptane
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 46,
label = "C7H14 <=> C5H10-2 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(4.3e+15,'s^-1','+|-',8.6e+13), n=0, Ea=(261.906,'kJ/mol'), T0=(1,'K'), Tmin=(683,'K'), Tmax=(1130,'K'), Pmin=(1200,'Pa'), Pmax=(7333,'Pa')),
reference = Article(
authors = [b'Zupan, M.', b'Walters, W.D.'],
title = b'The kinetics of the thermal decomposition of isopropylcyclobutane',
journal = b'J. Phys. Chem.',
volume = b'67',
pages = b'1845-1848',
year = b'1963',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1963ZUP/WAL1845-1848:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00009049
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00009049/rk00000001.xml
Bath gas: Isopropylcyclobutane
Excitation technique: Thermal
Analytical technique: Mass spectrometry
Note: Invalid activation energy uncertainty (8314.472) found and ignored
""",
)
entry(
index = 47,
label = "C6H10O <=> C4H8-3 + C2H2O-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(3.74e+14,'s^-1','*|/',1.66), n=0, Ea=(192.896,'kJ/mol'), T0=(1,'K'), Tmin=(534,'K'), Tmax=(586,'K'), Pmin=(66.66,'Pa'), Pmax=(1067,'Pa')),
reference = Article(
authors = [b'Frey, H.M.', b'Smith, R.A.'],
title = b'Thermal decomposition of 3,3-dimethylcyclobutanone',
journal = b'J. Chem. Soc. Perkin Trans. 2',
pages = b'752',
year = b'1977',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1977FRE/SMI752:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00009421
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00009421/rk00000001.xml
Uncertainty: 1.66
Bath gas: Cyclobutanone, 3,3-dimethyl-
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 48,
label = "C5H8-2 + C2H4-2 <=> C7H12-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(132000,'m^3/(mol*s)'), n=0, Ea=(123.886,'kJ/mol'), T0=(1,'K'), Tmin=(1000,'K'), Tmax=(1180,'K'), Pmin=(253000,'Pa'), Pmax=(253000,'Pa')),
reference = Article(
authors = [b'Simmie, J.M.'],
title = b'Kinetic Study of a Retro Diels-Alder Reaction in a Single-Pulse Shock Tube: Decyclization of 1-Methylcyclohex-1-ene',
journal = b'Int. J. Chem. Kinet.',
volume = b'10',
pages = b'227',
year = b'1978',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1978SIM227:1',
),
referenceType = "experiment",
shortDesc = """Derived from fitting to a complex mechanism""",
longDesc =
"""
PrIMe Reaction: r00011630
Bath gas: Ar
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 49,
label = "C6H10O-2 <=> C4H6O-2 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(3.4e+14,'s^-1'), n=0, Ea=(227.817,'kJ/mol'), T0=(1,'K'), Tmin=(633,'K'), Tmax=(683,'K'), Pmin=(1333,'Pa'), Pmax=(8666,'Pa')),
reference = Article(
authors = [b'Daignault, L.G.', b'Walters, W.D.'],
title = b'The thermal decomposition of methyl cyclobutyl ketone',
journal = b'J. Am. Chem. Soc.',
volume = b'80',
pages = b'541-545',
year = b'1958',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1958DAI/WAL541-545:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00011632
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00011632/rk00000001.xml
Bath gas: Ethanone, 1-cyclobutyl)-
Excitation technique: Thermal
Analytical technique: IR absorption
""",
)
entry(
index = 50,
label = "C7H14-2 <=> C5H10-3 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(3.4e+15,'s^-1'), n=0, Ea=(257.749,'kJ/mol','+|-',2.577), T0=(1,'K'), Tmin=(673,'K'), Tmax=(729,'K'), Pmin=(720,'Pa'), Pmax=(66000,'Pa')),
reference = Article(
authors = [b'Kellner, S.M.E.', b'Walters, W.D.'],
title = b'The thermal decomposition of n-propylcyclobutane',
journal = b'J. Phys. Chem.',
volume = b'65',
pages = b'466-469',
year = b'1961',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1961KEL/WAL466-469:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00012798
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00012798/rk00000001.xml
Bath gas: Cyclobutane, propyl-
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 51,
label = "C7H10 <=> C2H4 + C5H6",
degeneracy = 5.0,
kinetics = Arrhenius(A=(7.94e+15,'s^-1','*|/',2.14), n=0, Ea=(222.828,'kJ/mol','+|-',4.465), T0=(1,'K'), Tmin=(580,'K'), Tmax=(626,'K'), Pmin=(5000,'Pa'), Pmax=(5000,'Pa')),
reference = Article(
authors = [b'Cocks, A.T.', b'Frey, H.M.'],
title = b'Thermal Unimolecular Reactions of Bicyclo[3.2.0]hept-2-ene',
journal = b'J. Chem. Soc. A',
pages = b'2564',
year = b'1971',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1971COC/FRE2564:2',
),
referenceType = "experiment",
shortDesc = """Derived from fitting to a complex mechanism""",
longDesc =
"""
PrIMe Reaction: r00013054
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00013054/rk00000001.xml
Uncertainty: 2.1400001
Bath gas: Bicyclo[3.2.0]hept-6-ene
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 52,
label = "C7H14-3 <=> C2H4 + C5H10-4",
degeneracy = 6.0,
kinetics = Arrhenius(A=(8.51e+15,'s^-1','*|/',1.15), n=0, Ea=(266.895,'kJ/mol'), T0=(1,'K'), Tmin=(660,'K'), Tmax=(728,'K'), Pmin=(667,'Pa'), Pmax=(1600,'Pa')),
reference = Article(
authors = [b'Cocks, A.T.', b'Frey, H.M.'],
title = b'The Thermal Unimolecular Decomposition of 1,1,2-Trimethylcyclobutane',
journal = b'J. Phys. Chem.',
volume = b'75',
pages = b'1437',
year = b'1971',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1971COC/FRE1437:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016299
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016299/rk00000001.xml
Uncertainty: 1.15
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 53,
label = "C6H12O <=> C5H10-5 + CH2O-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(2.28e+15,'s^-1','*|/',1.42), n=0, Ea=(251.097,'kJ/mol','+|-',2.511), T0=(1,'K'), Tmin=(680,'K'), Tmax=(721,'K'), Pmin=(960,'Pa'), Pmax=(1667,'Pa')),
reference = Article(
authors = [b'Clements, A.D.', b'Frey, H.M.', b'Frey, J.G.'],
title = b'Thermal Decomposition of 3-Ethyl-3-methyloxetan and 3,3-Diethyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'71',
pages = b'2485',
year = b'1975',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1975CLE/FRE2485:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016368
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016368/rk00000001.xml
Uncertainty: 1.42
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 54,
label = "C7H12-2 <=> C5H8-2 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(4.37e+14,'s^-1'), n=0, Ea=(213.682,'kJ/mol'), T0=(1,'K'), Tmin=(577,'K'), Tmax=(621,'K')),
reference = Article(
authors = [b'Ellis, R.J.', b'Frey, H.M.'],
title = b'Thermal unimolecular decomposition of isopropenylcyclobutane',
journal = b'Trans. Faraday Soc.',
volume = b'59',
pages = b'2076-2079',
year = b'1963',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1963ELL/FRE2076-2079:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00011630
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00011630/rk00000001.xml
Bath gas: Cyclobutane,(1-methylethenyl)-
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 55,
label = "C7H12-2 <=> C5H8-2 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(1.66e+15,'s^-1','*|/',1.42), n=0, Ea=(218.671,'kJ/mol','+|-',2.187), T0=(1,'K'), Tmin=(574,'K'), Tmax=(624,'K'), Pmin=(133,'Pa'), Pmax=(1800,'Pa')),
reference = Article(
authors = [b'Frey, H.M.', b'Pottinger, R.'],
title = b'Thermal unimolecular reactions of vinylcyclobutane and isopropenylcyclobutane',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'74',
pages = b'1827',
year = b'1978',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1978FRE/POT1827:3',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00011630
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00011630/rk00000002.xml
Uncertainty: 1.42
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 56,
label = "C7H14-4 <=> C4H8-3 + C3H6-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(5.62e+15,'s^-1','*|/',1.17), n=0, Ea=(251.929,'kJ/mol'), T0=(1,'K'), Tmin=(660,'K'), Tmax=(728,'K'), Pmin=(667,'Pa'), Pmax=(1600,'Pa')),
reference = Article(
authors = [b'Cocks, A.T.', b'Frey, H.M.'],
title = b'The Thermal Unimolecular Decomposition of 1,1,2-Trimethylcyclobutane',
journal = b'J. Phys. Chem.',
volume = b'75',
pages = b'1437',
year = b'1971',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1971COC/FRE1437:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016300
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016300/rk00000001.xml
Uncertainty: 1.17
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 57,
label = "C6H10O2 <=> C4H6O2 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(7e+14,'s^-1','+|-',7e+12), n=0, Ea=(239.457,'kJ/mol','+|-',2.395), T0=(1,'K'), Tmin=(653,'K'), Tmax=(693,'K'), Pmin=(840,'Pa'), Pmax=(1520,'Pa')),
reference = Article(
authors = [b'Zupan, M.', b'Walters, W.D.'],
title = b'The kinetics of the thermal decomposition of methyl cyclobutanecarboxylate',
journal = b'J. Am. Chem. Soc.',
volume = b'86',
pages = b'173-176',
year = b'1964',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1964ZUP/WAL173-176:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00008915
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00008915/rk00000001.xml
Bath gas: Cyclobutanecarboxylic acid methyl ester
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 58,
label = "C7H12O <=> C5H8O-3 + C2H4-2",
degeneracy = 12.0,
kinetics = Arrhenius(A=(3.4e+14,'s^-1','+|-',1e+13), n=0, Ea=(226.985,'kJ/mol'), T0=(1,'K'), Tmin=(643,'K'), Tmax=(683,'K'), Pmin=(533,'Pa'), Pmax=(2266,'Pa')),
reference = Article(
authors = [b'Roquitte, B.C.', b'Walters, W.D.'],
title = b'The thermal decomposition of ethyl cyclobutyl ketone',
journal = b'J. Phys. Chem.',
volume = b'68',
pages = b'1606-1609',
year = b'1964',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1964ROQ/WAL1606-1609:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00013004
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00013004/rk00000001.xml
Bath gas: 1-Propanone, 1-cyclobutyl-
Excitation technique: Thermal
Analytical technique: IR absorption
""",
)
entry(
index = 59,
label = "C7H14O <=> C6H12-4 + CH2O-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(1.98e+15,'s^-1','*|/',1.16), n=0, Ea=(250.266,'kJ/mol'), T0=(1,'K'), Tmin=(675,'K'), Tmax=(736,'K'), Pmin=(960,'Pa'), Pmax=(1667,'Pa')),
reference = Article(
authors = [b'Clements, A.D.', b'Frey, H.M.', b'Frey, J.G.'],
title = b'Thermal Decomposition of 3-Ethyl-3-methyloxetan and 3,3-Diethyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'71',
pages = b'2485',
year = b'1975',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1975CLE/FRE2485:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00014886
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00014886/rk00000001.xml
Uncertainty: 1.16
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 60,
label = "C7H8O <=> C5H6-2 + C2H2O-2",
degeneracy = 1.0,
kinetics = Arrhenius(A=(1.45e+13,'s^-1','*|/',1.29), n=0, Ea=(157.144,'kJ/mol','+|-',1.571), T0=(1,'K'), Tmin=(471,'K'), Tmax=(534,'K'), Pmin=(2000,'Pa'), Pmax=(66700,'Pa')),
reference = Article(
authors = [b'Egger, K.W.', b'Cocks, A.T.'],
title = b'Kinetics of the Four-centre Elimination of Keten from Bicyclo[3.2.0]hept-2-en-6-one in the Gas Phase',
journal = b'J. Chem. Soc. Perkin Trans. 2',
pages = b'211',
year = b'1972',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1972EGG/COC211:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00015146
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00015146/rk00000001.xml
Uncertainty: 1.29
Bath gas: CH3CH=CH2
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 61,
label = "C7H10O <=> C5H8-3 + C2H2O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(1.62e+14,'s^-1','*|/',1.26), n=0, Ea=(202.873,'kJ/mol','+|-',2.029), T0=(1,'K'), Tmin=(546,'K'), Tmax=(652,'K'), Pmin=(547,'Pa'), Pmax=(5333,'Pa')),
reference = Article(
authors = [b'Cocks, A.T.', b'Egger, K.W.'],
title = b'The Gas-Phase Thermal Unimolecular Elimination of Keten from Bicyclo-[3.2.0]heptan-6-one',
journal = b'J. Chem. Soc. Perkin Trans. 2',
pages = b'2014',
year = b'1972',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1972COC/EGG2014:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00015261
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00015261/rk00000001.xml
Uncertainty: 1.26
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 62,
label = "C8H14 <=> C4H8-3 + C4H6-2",
degeneracy = 4.0,
kinetics = Arrhenius(A=(1.22e+15,'s^-1','*|/',2), n=0, Ea=(199.547,'kJ/mol','+|-',1.995), T0=(1,'K'), Tmin=(536,'K'), Tmax=(574,'K'), Pmin=(3306,'Pa'), Pmax=(3306,'Pa')),
reference = Article(
authors = [b'Chickos, J.S.', b'Frey, H.M.'],
title = b'The thermolysis of 2,2-dimethyl-1-vinylcyclobutane',
journal = b'J. Chem. Soc. Perkin Trans. 2',
pages = b'365',
year = b'1987',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1987CHI/FRE365:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00017015
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00017015/rk00000001.xml
Uncertainty: 2.0
Bath gas: N2
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 63,
label = "C8H16 <=> C4H8-3 + C4H8-6",
degeneracy = 8.0,
kinetics = Arrhenius(A=(2.04e+16,'s^-1','*|/',1.12), n=0, Ea=(272.715,'kJ/mol','+|-',0.673), T0=(1,'K'), Tmin=(683,'K'), Tmax=(6800,'K'), Pmin=(933,'Pa'), Pmax=(6666,'Pa')),
reference = Article(
authors = [b'Cocks, A.T.', b'Frey, H.M.'],
title = b'Thermal unimolecular decomposition of 1,1,3,3-tetramethylcyclobutane',
journal = b'J. Chem. Soc. A',
pages = b'1671-1673',
year = b'1969',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1969COC/FRE1671-1673:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016155
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016155/rk00000001.xml
Uncertainty: 1.12
Bath gas: Cyclobutane, 1,1,3,3-tetramethyl-
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 64,
label = "C7H14O-2 <=> C5H10-6 + C2H4O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(3.16e+14,'s^-1','*|/',2.24), n=0, Ea=(236.962,'kJ/mol','+|-',4.739), T0=(1,'K'), Tmin=(684,'K'), Tmax=(750,'K'), Pmin=(667,'Pa'), Pmax=(1333,'Pa')),
reference = Article(
authors = [b'Hammonds, P.', b'Holbrook, K.A.', b'Carless, H.A.J.'],
title = b'Thermolyses of cis- and trans-2,2,3,4-tetramethyloxetane',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'80',
pages = b'691',
year = b'1984',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1984HAM/HOL691:2',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016567
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016567/rk00000001.xml
Uncertainty: 2.24
Bath gas: Oxetane, 2,2,3,4-tetramethyl-,(E)-
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 65,
label = "C7H14O-2 <=> C5H10-6 + C2H4O",
degeneracy = 2.0,
kinetics = Arrhenius(A=(7.94e+13,'s^-1','*|/',5), n=0, Ea=(228.648,'kJ/mol','+|-',9.146), T0=(1,'K'), Tmin=(682,'K'), Tmax=(751,'K'), Pmin=(667,'Pa'), Pmax=(1333,'Pa')),
reference = Article(
authors = [b'Hammonds, P.', b'Holbrook, K.A.', b'Carless, H.A.J.'],
title = b'Thermolyses of cis- and trans-2,2,3,4-tetramethyloxetane',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'80',
pages = b'691',
year = b'1984',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1984HAM/HOL691:3',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016568
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016568/rk00000001.xml
Uncertainty: 5.0
Bath gas: Oxetane, 2,2,3,4-tetramethyl-,(Z)-
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 66,
label = "C7H12O-2 <=> C5H8-4 + C2H4O-2",
degeneracy = 2.0,
kinetics = Arrhenius(A=(2.63e+13,'s^-1','*|/',5), n=0, Ea=(200.379,'kJ/mol','+|-',8.015), T0=(1,'K'), Tmin=(599,'K'), Tmax=(657,'K'), Pmin=(467,'Pa'), Pmax=(4000,'Pa')),
reference = Article(
authors = [b'Carless, H.A.J.', b'Maitra, A.K.', b'Pottinger, R.', b'Frey, H.M.'],
title = b'Thermal decomposition of cis-2,4-dimethyl-trans-3-vinyloxetan',
journal = b'J. Chem. Soc. Faraday Trans. 1',
volume = b'76',
pages = b'1849',
year = b'1980',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1980CAR/MAI1849:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00016669
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00016669/rk00000001.xml
Uncertainty: 5.0
Excitation technique: Thermal
Analytical technique: Pressure measurement
""",
)
entry(
index = 67,
label = "C8H14O <=> C4H8-3 + C4H6O-3",
degeneracy = 4.0,
kinetics = Arrhenius(A=(7.23e+14,'s^-1','*|/',1.07), n=0, Ea=(234.468,'kJ/mol'), T0=(1,'K'), Tmin=(637,'K'), Tmax=(700,'K'), Pmin=(907,'Pa'), Pmax=(907,'Pa')),
reference = Article(
authors = [b'Frey, H.M.', b'Hopf, H.'],
title = b'The thermal unimolecular decomposition of 2,2,4,4-tetramethylcyclobutanone',
journal = b'J. Chem. Soc. Perkin Trans. 2',
pages = b'2016',
year = b'1973',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1973FRE/HOP2016:1',
),
referenceType = "experiment",
shortDesc = """Derived from fitting to a complex mechanism""",
longDesc =
"""
PrIMe Reaction: r00012680
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00012680/rk00000001.xml
Uncertainty: 1.0700001
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
entry(
index = 68,
label = "C9H12O <=> C5H6-2 + C4H6O-4",
degeneracy = 1.0,
kinetics = Arrhenius(A=(7.94e+12,'s^-1','*|/',1.66), n=0, Ea=(157.975,'kJ/mol','+|-',1.58), T0=(1,'K'), Tmin=(470,'K'), Tmax=(550,'K'), Pmin=(987,'Pa'), Pmax=(9333,'Pa')),
reference = Article(
authors = [b'Egger, K.W.'],
title = b'The Gas-Phase Thermal Unimolecular Elimination of 1,1-Dimethylketene from 7,7-Dimethylbicyclo[3.2.0]hept-2-en-6-one',
journal = b'Int. J. Chem. Kinet.',
volume = b'5',
pages = b'285',
year = b'1973',
url = b'http://kinetics.nist.gov/kinetics/Detail?id=1973EGG285:1',
),
referenceType = "experiment",
shortDesc = """Absolute value measured directly""",
longDesc =
"""
PrIMe Reaction: r00008919
PrIMe Kinetics: http://warehouse.primekinetics.org/depository/reactions/data/r00008919/rk00000001.xml
Uncertainty: 1.66
Excitation technique: Thermal
Analytical technique: Gas chromatography
""",
)
|
989,635 | a06c22e6748b828bfd90c2018e5c1aabde0d0317 | #!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
# @project: algorithm-python
# @Time : 2019/4/17 11:34
# @Author : Lemon
# @File : ArrayFindNumsAppearOnce40.py
"""
* 题目描述
* 一个整型数组里除了两个数字之外,其他的数字都出现了偶数次。请写程序找出这两个只出现一次的数字。
*
* 解题思路;
* 首先我们考虑这个问题的一个简单版本:一个数组里除了一个数字之外,其他的数字都出现了两次。请写程序找
* 出这个只出现一次的数字。
* 这个题目的突破口在哪里?题目为什么要强调有一个数字出现一次,其他的出现两次?我们想到了异或运算的性
* 质:任何一个数字异或它自己都等于0 。也就是说,如果我们从头到尾依次异或数组中的每一个数字,那么最终
* 的结果刚好是那个只出现一次的数字,因为那些出现两次的数字全部在异或中抵消掉了。
* 有了上面简单问题的解决方案之后,我们回到原始的问题。如果能够把原数组分为两个子数组。在每个子数
* 组中,包含一个只出现一次的数字,而其它数字都出现两次。如果能够这样拆分原数组,按照前面的办法就
* 是分别求出这两个只出现一次的数字了。我们还是从头到尾依次异或数组中的每一个数字,那么最终得到的结
* 果就是两个只出现一次的数字的异或结果。因为其它数字都出现了两次,在异或中全部抵消掉了。由于这两个数
* 字肯定不一样,那么这个异或结果肯定不为0 ,也就是说在这个结果数字的二进制表示中至少就有一位为1 。我
* 们在结果数字中找到第一个为1 的位的位置,记为第N 位。现在我们以第N 位是不是1 为标准把原数组中的数字
* 分成两个子数组,第一个子数组中每个数字的第N 位都为1 ,而第二个子数组的每个数字的第N 位都为0 。
* 现在我们已经把原数组分成了两个子数组,每个子数组都包含一个只出现一次的数字,而其它数字都出现了两次。
* 因此到此为止,所有的问题我们都已经解决。
* 需要考虑三个方面:
* 1、isBit(int num,int indexBit)判断num的二进制表示中从某一端数起的indexBit位是不是1
* 2、findFirstBitIs(int num)用来在整数num的二进制表示中找到最右边是1的位的坐标;考虑int的类型的取
* 值范围了,如果int化成二进制,最多Integer.SIZE位
* 3、FindNumsAppearOnce(self, array),返回[a,b] 其中ab是出现一次的两个数字,思路就是利用异或的特性;
"""
class Solution:
# 返回[a,b] 其中ab是出现一次的两个数字
def FindNumsAppearOnce(self, array):
# write code here
temp = array[0]
for i in range(1, len(array)):
temp = temp ^ array[i]
index = self.findFirstBitIs(temp)
a, b = 0, 0
for i in array:
if self.isBit(i, index):
b = b ^ i
else:
a = a ^ i
listResult = []
listResult.append(a)
listResult.append(b)
return listResult
# 注意这里用位运算,num右移之后,num 和 1与其实就是判断num 右移indexBit之后的最后一位是不是1
def isBit(self, num, indexBit):
num = num >> indexBit
return num & 1
def findFirstBitIs(self, array):
indexBit = 0
while((array&1)== 0):
array = array >>1
indexBit = indexBit + 1
return indexBit
if __name__ == "__main__":
test = [1, 2, 1, 4]
result = Solution().FindNumsAppearOnce(array=test)
print(result)
|
989,636 | 3972dcbd93b9e996d5980e9e42ab3e77df3f6573 | from linked_list import LinkedList
# Complete this function:
def nth_last_node(linked_list, n):
current = None
tail_seeker = linked_list.head_node
count = 1
while tail_seeker.get_value() is not None:
tail_seeker = tail_seeker.get_next_node()
count += 1
if count >= n + 1:
if current is None:
current = linked_list.head_node
else:
current = current.get_next_node()
return current
#time complexity is O(n) and space complexity is O(1)
# ------------------------------------- alternate solution----------------------------------------------------------------
# def list_nth_last(linked_list, n):
# linked_list_as_list = []
# current_node = linked_list.head_node
# while current_node:
# linked_list_as_list.append(current_node)
# current_node = current_node.get_next_node()
# return linked_list_as_list[len(linked_list_as_list) - n]
#time complexity is O(n) and space complexity is O(n)
def generate_test_linked_list():
linked_list = LinkedList()
for i in range(50, 0, -1):
linked_list.insert_beginning(i)
return linked_list
# Use this to test your code:
test_list = generate_test_linked_list()
print(test_list.stringify_list())
nth_last = nth_last_node(test_list, 4)
print(nth_last.value)
|
989,637 | be4d5c0ac66195ac286d616690d829bb26b4c835 | import os
import functools
import requests
from pathlib import Path
import flask
from flask import (
Flask,
abort,
make_response,
jsonify,
redirect,
render_template,
url_for,
)
from astroapiserver import ENV, API
PROJECT_PATH = Path(__file__).parents[1].resolve()
API_URL = "http://127.0.0.1:8081"
app = Flask(__name__, template_folder=str(PROJECT_PATH / "test" / "templates"))
app.secret_key = ENV["SECRET"]
@app.context_processor
def global_var():
var = {"API_URL": API_URL, "csrf_token": lambda: os.urandom(64)}
return var
def login_required(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
response = requests.get(API_URL, cookies=flask.request.cookies)
if response.json() and response.json().get("username"):
return f(*args, **kwargs)
else:
abort(401)
return wrapper
################
# WEB / ROUTES #
################
@app.route("/")
def home():
context = {}
response = requests.get(API_URL, cookies=flask.request.cookies)
if response.json() is not None:
user_info = response.json()
context["username"] = user_info.get("username")
return render_template("home.html", **context)
@app.route("/secret")
@login_required
def secret():
return jsonify("42")
|
989,638 | c898d70cb349404e272208517867199fa65990e5 | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Katherine
#
# Created: 01/04/2015
# Copyright: (c) Katherine 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
from pymongo import MongoClient, GEO2D, GEOSPHERE
import datetime as dt
import pprint
import simplekml
import math
import os
earthRad = 40075000/2*math.pi #radius of earth in meters
earthRadMiles = 3963.2
def convert_lat_lon_strings(string):
#cleans a latitude or longitude text string into decimal degrees
from string import punctuation
for symbol in punctuation.replace('-','').replace('.',''):
string = string.replace(symbol," ") #replace punctuation (other than - and .) with space
coord_list = string.split()
if coord_list[-1] == 'N' or coord_list[-1] == 'S' or coord_list[-1] == 'E' or coord_list[-1] == 'W':
if coord_list[-1] == "S" or coord_list[-1] == "W":
#if the coordinate is in the southern or western hemisphere, the lat/lon is negative.
if coord_list[0].find('-') == -1: coord_list[0] = '-' + coord_list[0]
coord_list.pop()#remove the hemisphere indicator
coordinate = 0
denominator = 1
for i in range(len(coord_list)):
#DMS to decimal formula: deg = D + M/60 + S/3600
coordinate+=float(coord_list[i])/denominator
denominator*=60
if abs(coordinate) > 180:
return 0
return coordinate
def clean_lat_long(orig_text):
#cleans a given a WBAN lat/lon entry, returns a [long, lat] list pair
try:
text = str(orig_text)
for char in text:
if char.isalpha():
#if there is an alpha character
if char not in ['N','S','E','W']:
text = text.replace(char,'') #remove any letters other than NSEW
if text == '32.267N 64.667W':
print "DEBUG!!!"
#add space between coordinate and hemisphere symbol
text = text.replace('N',' N').replace('S',' S').replace('E',' E').replace('W',' W')
if text.find('/') > -1:
#if the lat long is delineated by a '/'
latstr,lonstr = text.split('/')[0],text.split('/')[1] #accounts for additional notations in locaiton field ('/altitude')
elif text.find(',') > -1:
#comma-separated
latstr,lonstr = text.split(',')
elif text.find('N') > -1:
#split by the north hemisphere symbol
latstr,lonstr = text.split('N')
latstr = latstr + 'N' #add north symbol back in
elif text.find('S') > -1:
#split by the south hemisphere symbol
latstr,lonstr = text.split('S')
latstr = latstr + 'S' #add south symbol back in
elif text == '':
#empty location field
return [0,0]
else:
#otherwise print the string and return none
print "Cannot parse lat/long: %s" % text
return [0,0]
lat,lng = convert_lat_lon_strings(latstr),convert_lat_lon_strings(lonstr)
return [lng,lat]
except Exception as e:
print "#####Error parsing lat/long: %s" % orig_text
print "#####ERROR: %s" % e
############################ Lat/Lon formulas ########################################
def dist_lat_lon(latCoord1, lonCoord1, latCoord2, lonCoord2):
# Distance between two points, (lat1,lon1) and (lat2,lon2)
distance = 0 #reset the distance calculation
latRad1 = math.radians(latCoord1) #convert the first latitude to radians
lonRad1 = math.radians(lonCoord1) #convert the first longitude to radians
latRad2 = math.radians(latCoord2) #convert the second latitude to radians
lonRad2 = math.radians(lonCoord2) #convert the second longitude to radians
distance = earthRad * math.atan2(math.sqrt((math.cos(latRad2)*math.sin(lonRad1 - lonRad2))**2+(math.cos(latRad1)*math.sin(latRad2)-math.sin(latRad1)*math.cos(latRad2)*math.cos(lonRad1-lonRad2))**2),(math.sin(latRad2)*math .sin(latRad1)+math.cos(latRad1)*math.cos(latRad2)*math.cos(lonRad1-lonRad2)))
return distance
def lat_lon_from_point_and_bearing(lat,lon,angle,dist):
#returns a lat/lon pair that is dist NM from given lat/lon at the given angle bearing
lat2 = math.degrees(math.asin(math.sin(math.radians(lat))*math.cos(dist/earthRad) + math.cos(math.radians(lat))*math.sin(dist/earthRad)*math.cos(math.radians(angle))))
lon2 = lon + math.degrees(math.atan2(math.sin(math.radians(angle))*math.sin(dist/earthRad)*math.cos(math.radians(lat)),math.cos(dist/earthRad) - math.sin(math.radians(lat))*math.sin(math.radians(lat2))))
return lat2, lon2
def bearing_from_two_lat_lons(lat1,lon1,lat2,lon2):
x = math.sin(math.radians(lon2)-math.radians(lon1))*math.cos(math.radians(lat2))
y = math.cos(math.radians(lat1))*math.sin(math.radians(lat2)) - math.sin(math.radians(lat1))*math.cos(math.radians(lat2))*math.cos(math.radians(lon2)-math.radians(lon1))
return (math.degrees(math.atan2(x,y))+360)%360
def find_midpoint_between_lat_lons(lat1,lon1,lat2,lon2):
return lat_lon_from_point_and_bearing(lat1,lon1,bearing_from_two_lat_lons(lat1,lon1,lat2,lon2),dist_lat_lon(lat1,lon1,lat2,lon2)/2)
########################################################################################
def main():
start = dt.datetime.now()
client = MongoClient()
db = client['noaa_weather']
obs_coll = db['hourly_coll']
wban_coll = db['WBAN']
geo_coll = db['geo_json_coll']
zips = client['zip_codes']
zip_data_coll = zips['zip_data_coll']
## bulk = geo_coll.initialize_ordered_bulk_op()
## print len(list(wban_coll.find()))
## for WBAN in wban_coll.find({}):
## coords = clean_lat_long(WBAN['LOCATION']) #get cleaned lat/lng coordinates
## _id = WBAN['_id'] #keep parent ID
## if len(list(geo_coll.find({'_id':_id}))) == 0:
## record = {'_id':_id,'type':'Point','coordinates':coords}
## bulk.insert(record)
## try:
## result = bulk.execute()
## pprint.pprint(result)
## except Exception as e:
## print "#####ERROR: %s" % e
print "Indexing coordinates..."
try:
geo_coll.ensure_index([('coordinates', GEO2D)]) #create index on coordinates
except Exception as e:
print "#####ERROR: %s" % e
kml = simplekml.Kml()
for wban in wban_coll.find({'STATION_NAME':'SAN FRANCISCO'}):
doc = geo_coll.find_one({'_id':wban['_id']})
#### print doc
## if doc['coordinates'] == [0,0]:
## print "No coordinates for %s" % wban['WBAN_ID']
## else:
## pnt = kml.newpoint(name = '',coords=[(doc['coordinates'][0],doc['coordinates'][1])])
## pnt.style.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png'
## pnt.style.iconstyle.color = 'red'
## kml.save(os.getcwd() + "\\wbans_GEOJSON_SF.kml")
## kml = None
coords = eval(zip_data_coll.find_one({'zip':'95014'})['bbox'])#define coordinates of zip-code polygon
center = find_midpoint_between_lat_lons(coords[1],coords[0],coords[3],coords[2])
kml = simplekml.Kml()
#for doc in geo_coll.find({'coordinates':{'$geoWithin':{'$centerSphere': [[center[1],center[0]], 50/3963.2 ]}}}):
for doc in geo_coll.find({'coordinates':{'$near':[center[1],center[0]]}}).limit(10):
#print doc
wban_name = wban_coll.find_one({'_id':doc['_id']})['STATION_NAME']
pnt = kml.newpoint(name = wban_name,coords=[(doc['coordinates'][0],doc['coordinates'][1])])
pnt.style.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png'
pnt.style.iconstyle.color = '7fff0000'#transparent blue
kml.save(os.getcwd() + "\\wbans_near_95014.kml")
print "Runtime: " + str(dt.datetime.now() - start)
if __name__ == '__main__':
main()
|
989,639 | 012ac41500fe052568a4fc1eeb349c88cf12b13f | import param
import numpy as np
from bokeh.models import Patches
from ...core.data import Dataset
from ...core.util import basestring, max_range, dimension_sanitizer
from .graphs import GraphPlot
class SankeyPlot(GraphPlot):
color_index = param.ClassSelector(default=2, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node colors will be drawn""")
label_index = param.ClassSelector(default=2, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node labels will be drawn""")
label_position = param.ObjectSelector(default='right', objects=['left', 'right'],
doc="""
Whether node labels should be placed to the left or right.""")
show_values = param.Boolean(default=True, doc="""
Whether to show the values.""")
node_width = param.Number(default=15, doc="""
Width of the nodes.""")
node_padding = param.Integer(default=10, doc="""
Number of pixels of padding relative to the bounds.""")
iterations = param.Integer(default=32, doc="""
Number of iterations to run the layout algorithm.""")
_style_groups = dict(GraphPlot._style_groups, quad='nodes', text='label')
_draw_order = ['patches', 'quad', 'text']
style_opts = GraphPlot.style_opts + ['edge_fill_alpha', 'nodes_line_color',
'label_text_font_size']
filled = True
def _init_glyphs(self, plot, element, ranges, source):
ret = super(SankeyPlot, self)._init_glyphs(plot, element, ranges, source)
renderer = plot.renderers.pop(plot.renderers.index(self.handles['glyph_renderer']))
plot.renderers = [renderer] + plot.renderers
return ret
def get_data(self, element, ranges, style):
data, mapping, style = super(SankeyPlot, self).get_data(element, ranges, style)
self._compute_quads(element, data, mapping)
style['nodes_line_color'] = 'black'
lidx = element.nodes.get_dimension(self.label_index)
if lidx is None:
if self.label_index is not None:
dims = element.nodes.dimensions()[2:]
self.warning("label_index supplied to Sankey not found, "
"expected one of %s, got %s." %
(dims, self.label_index))
return data, mapping, style
self._compute_labels(element, data, mapping)
self._patch_hover(element, data)
return data, mapping, style
def _compute_quads(self, element, data, mapping):
"""
Computes the node quad glyph data.x
"""
quad_mapping = {'left': 'x0', 'right': 'x1', 'bottom': 'y0', 'top': 'y1'}
quad_data = dict(data['scatter_1'])
quad_data.update({'x0': [], 'x1': [], 'y0': [], 'y1': []})
for node in element._sankey['nodes']:
quad_data['x0'].append(node['x0'])
quad_data['y0'].append(node['y0'])
quad_data['x1'].append(node['x1'])
quad_data['y1'].append(node['y1'])
data['quad_1'] = quad_data
if 'node_fill_color' in mapping['scatter_1']:
quad_mapping['fill_color'] = mapping['scatter_1']['node_fill_color']
mapping['quad_1'] = quad_mapping
def _compute_labels(self, element, data, mapping):
"""
Computes labels for the nodes and adds it to the data.
"""
lidx = element.nodes.get_dimension(self.label_index)
if element.vdims:
edges = Dataset(element)[element[element.vdims[0].name]>0]
nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))
nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})
else:
nodes = element
value_dim = element.vdims[0]
labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]
if self.show_values:
value_labels = []
for i, node in enumerate(element._sankey['nodes']):
value = value_dim.pprint_value(node['value'])
label = '%s - %s' % (labels[i], value)
if value_dim.unit:
label += ' %s' % value_dim.unit
value_labels.append(label)
labels = value_labels
ys = nodes.dimension_values(1)
nodes = element._sankey['nodes']
offset = (nodes[0]['x1']-nodes[0]['x0'])/4.
if self.label_position == 'right':
xs = np.array([node['x1'] for node in nodes])+offset
else:
xs = np.array([node['x0'] for node in nodes])-offset
data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels])
align = 'left' if self.label_position == 'right' else 'right'
mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align)
def _patch_hover(self, element, data):
"""
Replace edge start and end hover data with label_index data.
"""
if not (self.inspection_policy == 'edges' and 'hover' in self.handles):
return
lidx = element.nodes.get_dimension(self.label_index)
src, tgt = [dimension_sanitizer(kd.name) for kd in element.kdims[:2]]
if src == 'start': src += '_values'
if tgt == 'end': tgt += '_values'
lookup = dict(zip(*(element.nodes.dimension_values(d) for d in (2, lidx))))
src_vals = data['patches_1'][src]
tgt_vals = data['patches_1'][tgt]
data['patches_1'][src] = [lookup.get(v, v) for v in src_vals]
data['patches_1'][tgt] = [lookup.get(v, v) for v in tgt_vals]
def get_extents(self, element, ranges, range_type='combined'):
if range_type == 'extents':
return element.nodes.extents
xdim, ydim = element.nodes.kdims[:2]
xpad = .05 if self.label_index is None else 0.25
x0, x1 = ranges[xdim.name][range_type]
y0, y1 = ranges[ydim.name][range_type]
xdiff = (x1-x0)
ydiff = (y1-y0)
if self.label_position == 'right':
x0, x1 = x0-(0.05*xdiff), x1+xpad*xdiff
else:
x0, x1 = x0-xpad*xdiff, x1+(0.05*xdiff)
x0, x1 = max_range([xdim.range, (x0, x1)])
y0, y1 = max_range([ydim.range, (y0-(0.05*ydiff), y1+(0.05*ydiff))])
return (x0, y0, x1, y1)
def _postprocess_hover(self, renderer, source):
if self.inspection_policy == 'edges':
if not isinstance(renderer.glyph, Patches):
return
else:
if isinstance(renderer.glyph, Patches):
return
super(SankeyPlot, self)._postprocess_hover(renderer, source)
|
989,640 | 8ae8bcecfd9f0ee703431e9e237f3198175f9398 | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
from azext_swiftlet.generated._client_factory import cf_virtual_machine
swiftlet_virtual_machine = CliCommandType(
operations_tmpl='azext_swiftlet.vendored_sdks.swiftlet.operations._virtual_machine_operations#VirtualMachineOpe'
'rations.{}',
client_factory=cf_virtual_machine)
with self.command_group('swiftlet virtual-machine', swiftlet_virtual_machine, client_factory=cf_virtual_machine,
is_experimental=True) as g:
g.custom_command('list', 'swiftlet_virtual_machine_list')
g.custom_show_command('show', 'swiftlet_virtual_machine_show')
g.custom_command('create', 'swiftlet_virtual_machine_create', supports_no_wait=True)
g.custom_command('update', 'swiftlet_virtual_machine_update', supports_no_wait=True)
g.custom_command('delete', 'swiftlet_virtual_machine_delete', supports_no_wait=True, confirmation=True)
g.custom_command('list-bundle', 'swiftlet_virtual_machine_list_bundle')
g.custom_command('list-image', 'swiftlet_virtual_machine_list_image')
g.custom_command('start', 'swiftlet_virtual_machine_start', supports_no_wait=True)
g.custom_command('stop', 'swiftlet_virtual_machine_stop', supports_no_wait=True)
g.custom_wait_command('wait', 'swiftlet_virtual_machine_show')
|
989,641 | c489663b1675bfc333455e132743fe45948e9de9 | import numpy
from srxraylib.plot.gol import plot, plot_image
from srxraylib.metrology.dabam import write_shadowSurface
filein = "/users/srio/Oasys/diaboloid_correction.txt"
sagittal = numpy.loadtxt(filein)
sagittal_x = sagittal[:,0].copy()
sagittal_y = sagittal[:,1].copy() * 1
# print(sagittal_x.shape)
plot(sagittal_x, sagittal_y)
tangential_x = numpy.linspace(-0.4, 0.4, 1001)
tangential_y = numpy.zeros_like(tangential_x)
mesh = numpy.zeros((sagittal_x.size, tangential_x.size))
for i in range(tangential_x.size):
mesh[:,i] = sagittal_y
mesh -= mesh[mesh.shape[0]//2, mesh.shape[1]//2]
plot_image(mesh, sagittal_x, tangential_x, aspect='auto')
fileout = "/users/srio/Oasys/diaboloid_bl1222_goldenrule_shadow.dat"
print(mesh.shape, sagittal_x.shape, tangential_x.shape)
write_shadowSurface(mesh.T, sagittal_x, tangential_x, outFile=fileout)
mirror_size = sagittal_x[-1] * 2
d_mirror_over_crystal = 18.8 / 14.1
crystal_size = mirror_size / d_mirror_over_crystal
print("mirror_size, crystal size: ", mirror_size, crystal_size)
fileout = "/users/srio/Oasys/diaboloid_crystal.dat"
print(mesh.shape, sagittal_x.shape, tangential_x.shape)
bragg_angle = (90 - 80.824081) * numpy.pi / 180
correction_factor = (0.002 / bragg_angle) * 1.0
print("Correction factor: ", correction_factor)
write_shadowSurface(mesh.T * correction_factor, sagittal_x / d_mirror_over_crystal, tangential_x / 0.4 * 0.015, outFile=fileout) |
989,642 | b39abf19244f5a7f7caec4005a63834f8b74d9e6 | #!/usr/bin/env python
import sys
import os
from time import sleep
key = ""
if len(sys.argv) <= 2:
print("Usage: ")
print("python rsadecrypt.py <numeric file> <modulus> <d>")
else:
file = sys.argv[1]
modulus = sys.argv[2]
d = sys.argv[3]
f = open(file).read().split(" ")
output = open("decrypted.rsa", "w")
for line in f:
decrypted = (int(line)**int(d)) % int(modulus)
key += chr(decrypted)
print(key)
os.system("clear")
print("Writing to file decrypted.rsa..")
sleep(2)
output.write(key)
print("Complete") |
989,643 | 50e0cb42d4b8f97eacabb64b47bef01cdaac0448 | import os
import matplotlib.pyplot as plt
import h5py
def display_dataset(path, save, dset='sum'):
"""Displays a dataset, where VOIs are saved in individual locations."""
# List datasets
files_surf = os.listdir(path[0])
files_surf.sort()
files_deep = os.listdir(path[1])
files_deep.sort()
files_calc = os.listdir(path[2])
files_calc.sort()
# Corrected names
files = os.listdir(r'Y:\3DHistoData\Subvolumes_2mm')
files.sort()
k = 0
# Loop for displaying images
for fsurf, fdeep, fcalc in zip(files_surf, files_deep, files_calc):
# Load images
im_surf = loadh5(path[0], fsurf, dset)
im_deep = loadh5(path[1], fdeep, dset)
im_calc = loadh5(path[2], fcalc, dset)
# Create figure
fig = plt.figure(dpi=300)
ax1 = fig.add_subplot(131)
ax1.imshow(im_surf, cmap='gray')
plt.title(fsurf + ', Surface')
ax2 = fig.add_subplot(132)
ax2.imshow(im_deep, cmap='gray')
plt.title('Deep')
ax3 = fig.add_subplot(133)
ax3.imshow(im_calc, cmap='gray')
plt.title('Calcified')
if save is not None:
while files[k] == 'Images' or files[k] == 'MeanStd':
k += 1
# Save figure
if not os.path.exists(save):
os.makedirs(save, exist_ok=True)
plt.tight_layout()
fig.savefig(os.path.join(save, files[k]), bbox_inches="tight", transparent=True)
plt.close()
# Save h5
if not os.path.exists(save + '\\MeanStd\\'):
os.makedirs(save + '\\MeanStd\\', exist_ok=True)
h5 = h5py.File(save + "\\MeanStd\\" + files[k] + '.h5', 'w')
h5.create_dataset('surf', data=im_surf)
h5.create_dataset('deep', data=im_deep)
h5.create_dataset('calc', data=im_calc)
h5.close()
else:
plt.show()
k += 1
def loadh5(path, file, name=None):
# Image loading
h5 = h5py.File(os.path.join(path, file), 'r')
if name is None:
name = list(h5.keys())[0]
ims = h5[name][:]
h5.close()
return ims
if __name__ == '__main__':
# Pipeline variables
impath = [r"Y:\3DHistoData\C#_VOIS_2mm\cartvoi_surf_new",
r"Y:\3DHistoData\C#_VOIS_2mm\cartvoi_deep_new",
r"Y:\3DHistoData\C#_VOIS_2mm\cartvoi_calc_new"]
savepath = r"Y:\3DHistoData\C#_VOIS_2mm"
# Call pipeline
display_dataset(impath, savepath)
|
989,644 | 3c6bd80fb5e24bcfaa39ac3b61a1a3f82ba1435c | from __future__ import annotations
from itertools import chain
from random import random, choice, sample
from typing import Optional, List
from lcs import Perception, TypedList
from lcs.agents.acs2.components.alp import expected_case, unexpected_case, \
cover
from lcs.agents.acs2.components.genetic_algorithm \
import mutate, two_point_crossover
from lcs.strategies.genetic_algorithms import roulette_wheel_selection
from . import Classifier, Configuration
class ClassifiersList(TypedList):
"""
Represents overall population, match/action sets
"""
def __init__(self, *args, cfg: Configuration) -> None:
self.cfg = cfg
super().__init__((Classifier, ), *args)
def form_match_set(self,
situation: Perception,
cfg: Configuration) -> ClassifiersList:
matching = [cl for cl in self if cl.condition.does_match(situation)]
return ClassifiersList(*matching, cfg=cfg)
def form_action_set(self,
action: int,
cfg: Configuration) -> ClassifiersList:
matching = [cl for cl in self if cl.action == action]
return ClassifiersList(*matching, cfg=cfg)
def expand(self) -> List[Classifier]:
"""
Returns an array containing all micro-classifiers
Returns
-------
List[Classifier]
list of all expanded classifiers
"""
list2d = [[cl] * cl.num for cl in self]
return list(chain.from_iterable(list2d))
def get_maximum_fitness(self) -> float:
"""
Returns the maximum fitness value amongst those classifiers
that anticipated a change in environment.
Returns
-------
float
fitness value
"""
anticipated_change_cls = [cl for cl in self
if cl.does_anticipate_change()]
if len(anticipated_change_cls) > 0:
best_cl = max(anticipated_change_cls, key=lambda cl: cl.fitness)
return best_cl.fitness
return 0.0
def apply_alp(self,
p0: Perception,
action: int,
p1: Perception,
time: int,
population: ClassifiersList,
match_set: ClassifiersList) -> None:
"""
The Anticipatory Learning Process. Handles all updates by the ALP,
insertion of new classifiers in pop and possibly matchSet, and
deletion of inadequate classifiers in pop and possibly matchSet.
:param p0:
:param action:
:param p1:
:param time:
:param population:
:param match_set:
"""
new_list = ClassifiersList(cfg=self.cfg)
new_cl: Optional[Classifier] = None
was_expected_case = False
delete_count = 0
for cl in self:
cl.increase_experience()
cl.set_alp_timestamp(time)
if cl.does_anticipate_correctly(p0, p1):
new_cl = expected_case(cl, p0, time)
was_expected_case = True
else:
new_cl = unexpected_case(cl, p0, p1, time)
if cl.is_inadequate():
# Removes classifier from population, match set
# and current list
delete_count += 1
lists = [x for x in [population, match_set, self] if x]
for lst in lists:
lst.safe_remove(cl)
if new_cl is not None:
new_cl.tga = time
self.add_alp_classifier(new_cl, new_list)
# No classifier anticipated correctly - generate new one
if not was_expected_case:
new_cl = cover(p0, action, p1, time, self.cfg)
self.add_alp_classifier(new_cl, new_list)
# Merge classifiers from new_list into self and population
self.extend(new_list)
population.extend(new_list)
if match_set is not None:
new_matching = [cl for cl in new_list if
cl.condition.does_match(p1)]
match_set.extend(new_matching)
def apply_reinforcement_learning(self, reward: int, p) -> None:
"""
Reinforcement Learning. Applies RL according to
current reinforcement `reward` and back-propagated reinforcement
`maximum_fitness`.
:param reward: current reward
:param p: maximum fitness - back-propagated reinforcement
"""
for cl in self:
cl.update_reward(reward + self.cfg.gamma * p)
cl.update_intermediate_reward(reward)
def apply_ga(self,
time: int,
population: ClassifiersList,
match_set: ClassifiersList,
situation: Perception,
randomfunc=random,
samplefunc=sample) -> None:
if self.should_apply_ga(time):
self.set_ga_timestamp(time)
# Select parents
parent1, parent2 = roulette_wheel_selection(
self, lambda cl: pow(cl.q, 3) * cl.num)
child1 = Classifier.copy_from(parent1, time)
child2 = Classifier.copy_from(parent2, time)
mutate(child1, child1.cfg.mu, randomfunc=randomfunc)
mutate(child2, child2.cfg.mu, randomfunc=randomfunc)
if randomfunc() < self.cfg.chi:
if child1.effect == child2.effect:
two_point_crossover(child1, child2, samplefunc=samplefunc)
# Update quality and reward
# TODO: check if needed
child2.q = float(sum([child1.q, child2.q]) / 2)
child2.r = float(sum([child1.r, child2.r]) / 2)
child1.q /= 2
child2.q /= 2
children = [child for child in [child1, child2]
if child.condition.specificity > 0]
# if two classifiers are identical, leave only one
unique_children = set(children)
self.delete_ga_classifiers(population, match_set,
len(unique_children),
randomfunc=randomfunc)
# check for subsumers / similar classifiers
for child in unique_children:
self.add_ga_classifier(child, match_set, population)
def add_ga_classifier(self,
child: Classifier,
match_set: ClassifiersList,
population: ClassifiersList):
"""
Find subsumer/similar classifier, if present - increase its numerosity,
else add this new classifier
:param child: new classifier to add
:param match_set:
:param population:
:return:
"""
old_cl = self.find_old_classifier(child)
if old_cl is None:
self.append(child)
population.append(child)
if match_set is not None:
match_set.append(child)
else:
if not old_cl.is_marked():
old_cl.num += 1
def add_alp_classifier(self,
child: Classifier,
new_list: ClassifiersList) -> None:
"""
Looks for subsuming / similar classifiers in the current set and
those created in the current ALP run.
If a similar classifier was found it's quality is increased,
otherwise `child_cl` is added to `new_list`.
Parameters
----------
child: Classifier
New classifier to examine
new_list: ClassifiersList
A list of newly created classifiers in this ALP run
"""
# TODO: p0: write tests
old_cl = None
# Look if there is a classifier that subsumes the insertion candidate
for cl in self:
if cl.does_subsume(child):
if old_cl is None or cl.is_more_general(old_cl):
old_cl = cl
# Check if any similar classifier was in this ALP run
if old_cl is None:
for cl in new_list:
if cl == child:
old_cl = cl
# Check if there is similar classifier already
if old_cl is None:
for cl in self:
if cl == child:
old_cl = cl
if old_cl is None:
new_list.append(child)
else:
old_cl.increase_quality()
def get_similar(self, other: Classifier) -> Optional[Classifier]:
"""
Searches for the first similar classifier `other` and returns it.
Parameters
----------
other: Classifier
classifier to compare
Returns
-------
Optional[Classifier]
classifier (with the same condition, action, effect),
None otherwise
"""
return next(filter(lambda cl: cl == other, self), None)
def should_apply_ga(self, time: int):
"""
Checks the average last GA application to determine if a GA
should be applied.If no classifier is in the current set,
no GA is applied!
:param time:
:return:
"""
overall_time = sum(cl.tga * cl.num for cl in self)
overall_num = self.overall_numerosity()
if overall_num == 0:
return False
if time - overall_time / overall_num > self.cfg.theta_ga:
return True
return False
def overall_numerosity(self):
return sum(cl.num for cl in self)
def set_ga_timestamp(self, time: int):
"""
Sets the GA time stamps to the current time to control
the GA application frequency.
:param time:
:return:
"""
for cl in self:
cl.tga = time
def delete_ga_classifiers(self,
population: ClassifiersList,
match_set: ClassifiersList,
child_no: int,
randomfunc=random):
"""
Deletes classifiers in the set to keep the size THETA_AS.
Also considers that still childNo classifiers are added by the GA.
:param randomfunc:
:param population:
:param match_set:
:param child_no: number of classifiers that will be inserted
:return:
"""
del_no = self.overall_numerosity() + child_no - self.cfg.theta_as
if del_no <= 0:
# There is still room for more classifiers
return
# print("GA: requested to delete: %d classifiers", del_no)
for _ in range(0, del_no):
self.delete_a_classifier(
match_set, population, randomfunc=randomfunc)
def delete_a_classifier(self,
match_set: ClassifiersList,
population: ClassifiersList,
randomfunc=random):
""" Delete one classifier from a population """
if len(population) == 0: # Nothing to remove
return None
cl_del = self.select_classifier_to_delete(randomfunc=randomfunc)
if cl_del is not None:
if cl_del.num > 1:
cl_del.num -= 1
else:
# Removes classifier from population, match set
# and current list
lists = [x for x in [population, match_set, self] if x]
for lst in lists:
lst.safe_remove(cl_del)
def select_classifier_to_delete(self, randomfunc=random) -> \
Optional[Classifier]:
if len(self) == 0:
return None
cl_del = None
while cl_del is None: # We must delete at least one
for cl in self.expand():
if randomfunc() < 1. / 3.:
if cl_del is None:
cl_del = cl
else:
cl_del = self.select_preferred_to_delete(cl, cl_del)
return cl_del
@staticmethod
def select_preferred_to_delete(cl: Classifier,
cl_to_delete: Classifier) -> \
Classifier:
if cl.q - cl_to_delete.q < -0.1:
cl_to_delete = cl
return cl_to_delete
if abs(cl.q - cl_to_delete.q) <= 0.1:
if cl.is_marked() and not cl_to_delete.is_marked():
cl_to_delete = cl
elif cl.is_marked or not cl_to_delete.is_marked():
if cl.tav > cl_to_delete.tav:
cl_to_delete = cl
return cl_to_delete
def find_old_classifier(self, cl: Classifier):
old_cl = None
if self.cfg.do_subsumption:
old_cl = self.find_subsumer(cl)
if old_cl is None:
old_cl = self.get_similar(cl)
return old_cl
def find_subsumer(self, cl: Classifier, choice_func=choice) -> \
Classifier:
subsumer = None
most_general_subsumers: List[Classifier] = []
for classifier in self:
if classifier.does_subsume(cl):
if subsumer is None:
subsumer = classifier
most_general_subsumers = [subsumer]
elif classifier.is_more_general(subsumer):
subsumer = classifier
most_general_subsumers = [subsumer]
elif not subsumer.is_more_general(classifier):
most_general_subsumers.append(classifier) # !
return choice_func(most_general_subsumers) \
if most_general_subsumers else None
|
989,645 | a66f7deaa574e51b941f2105138bc786c06647e8 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 18:29:53 2019
@author: user
"""
#66. Plus One
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
for i in reversed(range(len(digits))): #數列倒序
if digits[i]==9: #9進位為0
digits[i]=0
else:
digits[i]+=1
return digits
digits[0]=1 #0進位為1
digits.append(0) #尾數加一個0
#input 999
#output 000 => 100 => 1000
return digits |
989,646 | 0473ef52de3251d74c5f94cb498a031cd5e04a5a | # coding: utf-8
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse, HttpResponseForbidden, HttpResponseServerError, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404, render
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.utils import simplejson
from django.utils.html import escape
from datetime import date, datetime, timedelta
import csv
import MySQLdb
from django.conf import settings
from contacts.models import Person, Course
from contacts.forms import PersonCreateForm, PersonFilterForm, ImportCSVForm, PersonIdentificationForm,PersonRegistrationForm, PersonAddressForm, PersonLaboralForm, SynchronizeSPIPForm, PersonLaboralLevelsForm
from contacts.tables import PersonTable, ExportPersonTable
import sys, ast, urllib2, simplejson
def check_pending_sync():
lastperson = Person.objects.latest('external_id')
req = urllib2.Request(settings.SPIP_SYNC_URL + '?action=check&last_id=' + str(lastperson.external_id) , None, {'user-agent':settings.SPIP_SYNC_AGENT})
opener = urllib2.build_opener()
json_data = simplejson.load(opener.open(req))
if json_data['error']:
regs_pending = -1
else:
try:
regs_pending = int(json_data['pending'])
except exceptions.ValueError:
regs_pending = json_data['pending']
return regs_pending
def list(request, page=1, template='contacts/person/list.html'):
"""List of all the people.
:param template: Add a custom template.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
person_list = Person.objects.all()
if request.method == 'GET':
form = PersonFilterForm(request.GET)
if form.is_valid():
if form.cleaned_data['last_name']:
person_list = person_list.filter(last_name__istartswith=form.cleaned_data['last_name'])
if form.cleaned_data['id_card']:
person_list = person_list.filter(id_card__istartswith=form.cleaned_data['id_card'])
if form.cleaned_data['contact_type']:
person_list = person_list.filter(contact_type=form.cleaned_data['contact_type'])
if form.cleaned_data['status']:
person_list = person_list.filter(status=form.cleaned_data['status'])
if form.cleaned_data['mailnotpaid_unsent']:
person_list = person_list.filter(date_mailnotpaid__isnull = True).exclude(status='ok_all')
if form.cleaned_data['mailregister_unsent']:
person_list = person_list.filter(date_mailregister__isnull = True, status='ok_all')
if form.cleaned_data['course']:
person_list = person_list.filter(courses__in = form.cleaned_data['course'])
else:
form = PersonFilterForm()
table = PersonTable(person_list, order_by = request.GET.get("sort",'-date_registration') )
table.paginate(page=request.GET.get("page", 1))
# Comprovam si hi ha nous registres per sincronitzar. Ho feim una vegada per sessio.
if not request.session:
request.session={}
regs_not_sync = request.session.get('regs_not_sync',-1)
if regs_not_sync == -1:
regs_not_sync = check_pending_sync()
request.session['regs_not_sync'] = regs_not_sync
kwvars = {
'table' : table,
'form': form,
'regs_not_sync': regs_not_sync,
}
return render_to_response(template, kwvars, RequestContext(request))
def map(request, template='contacts/person/map.html'):
"""Map with google maps
:param template: Add a custom template.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
person_list = Person.objects.all()
if request.method == 'POST':
form = PersonFilterForm(request.POST)
if form.is_valid():
if form.cleaned_data['last_name']:
person_list = person_list.filter(last_name__istartswith=form.cleaned_data['last_name'])
if form.cleaned_data['id_card']:
person_list = person_list.filter(id_card__istartswith=form.cleaned_data['id_card'])
if form.cleaned_data['contact_type']:
person_list = person_list.filter(contact_type=form.cleaned_data['contact_type'])
else:
form = PersonFilterForm()
kwvars = {
'person_list' : person_list,
'form': form,
}
return render_to_response(template, kwvars, RequestContext(request))
def export(request):
""" Export people to csv
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
filename = 'export-inscrits%s.csv' % date.today().strftime("%y-%m-%d")
person_list = Person.objects.all()
table = ExportPersonTable(person_list)
table.order_by = request.GET.get("sort",'last_name')
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
writer = csv.writer(response)
# Write headers to CSV file
headers = []
for column in table.columns:
headers.append(column.header.encode('utf8'))
writer.writerow(headers)
# Write data to CSV file
for obj in table.rows:
row = []
for value in obj:
row.append(value.encode('utf8'))
writer.writerow(row)
# Return CSV file to browser as download
return response
def importCSV(request, template='contacts/person/import.html'):
""" Import people from csv
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
registres = 0
if request.method == 'POST':
form = ImportCSVForm(request.POST, request.FILES)
if form.is_valid():
uploaded_file = request.FILES['fitxer']
uploaded_file.read()
reader = csv.reader(uploaded_file, delimiter=',', quotechar='"')
for row in reader:
person = Person()
person.first_name = row[0]
person.last_name = row[1]
person.contact_type = row[3]
person.id_card = row[5]
base_slug = slugify("%s %s %s" % (p.first_name, p.last_name, p.secondlast_name))
# hem de comprovar que no existeix cap persona amb aquest nom. Si no, hem d'afegir -1
tmp_slug = base_slug
trobat = True
counter = 0
while trobat:
try:
Person.objects.get(slug__iexact=tmp_slug)
counter = counter + 1
tmp_slug = "%s-%s" % (base_slug, str(counter))
except Person.DoesNotExist:
trobat = False
person.slug = tmp_slug
person.save()
registres = registres + 1
else:
form = ImportCSVForm()
return render_to_response(template, {'registres': registres, 'form': form}, RequestContext(request))
def calculaSlugPersona(person):
base_slug = slugify("%s %s" % (person.first_name, person.last_name))
# hem de comprovar que no existeix cap persona amb aquest nom. Si no, hem d'afegir -1
tmp_slug = base_slug
trobat = True
counter = 0
while trobat:
try:
Person.objects.get(slug__iexact=tmp_slug)
counter = counter + 1
tmp_slug = "%s-%s" % (base_slug, str(counter))
except Person.DoesNotExist:
trobat = False
return tmp_slug
def synchronizeSPIPForm(request, template='contacts/person/synchronize.html'):
""" Import inscriptions from spip form
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
registres = 0
user = request.user
if request.method == 'POST':
form = SynchronizeSPIPForm(request.POST)
if form.is_valid():
if form.cleaned_data['confirma'] == True:
lastperson = Person.objects.latest('external_id')
req = urllib2.Request(settings.SPIP_SYNC_URL + '?action=get&last_id=' + str(lastperson.external_id) , None, {'user-agent':settings.SPIP_SYNC_AGENT})
opener = urllib2.build_opener()
json_data = simplejson.load(opener.open(req))
person = Person()
person.external_id = 0
laboral_levels = []
courses = []
for row in json_data['list']:
if row['id_donnee'] != str(person.external_id) and person.external_id > 0:
# donam d'alta la persona anterior
person.slug = calculaSlugPersona(person)
person.user_add = user
person.user_modify = user
person.save()
# alta relacio persona cursos
for course_id in courses:
person.courses.add(Course.objects.get(id=course_id))
registres = registres + 1
# nova persona
person = Person()
laboral_levels = []
courses = []
person.external_id = row['id_donnee']
person.date_registration = row['date']
if row['champ'] == 'ligne_1':
person.first_name = row['valeur'] #.decode("utf8", "ignore")
elif row['champ'] == 'ligne_2':
person.last_name = row['valeur'] #.decode("utf8", "ignore")
elif row['champ'] == 'ligne_3':
person.id_card = row['valeur']
elif row['champ'] == 'ligne_4':
person.home_address = row['valeur'] #.decode("utf8", "ignore")
elif row['champ'] == 'ligne_5':
person.home_postalcode = row['valeur']
elif row['champ'] == 'ligne_6':
person.home_town = row['valeur'] #.decode("utf8", "ignore")
elif row['champ'] == 'select_5':
person.home_province = row['titre'] #.decode("utf8", "ignore")
elif row['champ'] == 'email_1':
person.email_address = row['valeur']
elif row['champ'] == 'ligne_8':
person.phone_number = row['valeur']
elif row['champ'] == 'ligne_9':
person.mobile_number = row['valeur']
elif row['champ'] == 'ligne_19':
person.twitter = row['valeur'] #.decode("utf8", "ignore")
elif row['champ'] == 'select_1':
person.laboral_category = row['rang']
elif row['champ'] == 'multiple_1':
laboral_levels.append(row['rang'])
person.laboral_levels = ",".join("'%s'" % str(level) for level in laboral_levels)
# print >> sys.stderr, 'Laboral levels = %s' % person.laboral_levels
elif row['champ'] == 'ligne_10':
person.laboral_nrp = row['valeur']
elif row['champ'] == 'num_1':
person.laboral_years = float(row['valeur']) if '.' in row['valeur'] else int(row['valeur'])
elif row['champ'] == 'select_2':
person.laboral_cuerpo = row['rang']
elif row['champ'] == 'ligne_11':
person.laboral_degree = row['valeur'] #.decode("utf8", "ignore")
elif row['champ'] == 'ligne_12':
person.laboral_centername = row['valeur'] #.decode("utf8", "ignore")
elif row['champ'] == 'ligne_13':
person.laboral_centercode = row['valeur']
elif row['champ'] == 'ligne_16':
person.laboral_centerpostalcode = row['valeur']
elif row['champ'] == 'ligne_14':
person.laboral_centertown = row['valeur'] #.decode("utf8", "ignore")
elif row['champ'] == 'select_4':
person.laboral_centerprovince = row['titre'] #.decode("utf8", "ignore")
elif row['champ'] == 'select_3':
person.math_society = row['rang']
elif row['champ'] == 'texte_1':
person.remarks = row['valeur'] #.decode("utf8", "ignore")
elif row['champ'] == 'select_6':
person.lang = row['rang']
# nou, cursos
elif row['champ'] == 'multiple_2':
courses.append(row['rang'])
# Hem de donar d'alta la darrera persona
if person.external_id > 0:
person.slug = calculaSlugPersona(person)
person.user_add = user
person.user_modify = user
person.save()
# alta relacio persona cursos
for course_id in courses:
person.courses.add(Course.objects.get(id=course_id))
registres = registres + 1
# Posam a 0 els registres no sincronitzats
if not request.session:
request.session={}
request.session['regs_not_sync'] = 0
else:
form = SynchronizeSPIPForm()
registres = -1
return render_to_response(template, {'registres': registres, 'form': form}, RequestContext(request))
def detail(request, slug, template='contacts/person/detail.html'):
"""Detail of a person.
:param template: Add a custom template.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
try:
person = Person.objects.get(slug__iexact=slug)
if not request.session:
request.session={}
viewed_list = request.session.get('viewed',[])
if person in viewed_list:
viewed_list.remove(person)
viewed_list.insert(0,person) # d'aquesta manera estara al final
del viewed_list[8:10] # eliminam si hi ha moltes
request.session['viewed'] = viewed_list
except Person.DoesNotExist:
raise Http404
kwvars = {
'object': person,
}
return render_to_response(template, kwvars, RequestContext(request))
def create(request, template='contacts/person/create.html'):
"""Create a person.
:param template: A custom template.
https://docs.djangoproject.com/en/dev/topics/forms/modelforms/#more-than-one-foreign-key-to-the-same-model
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
user = request.user
if not user.has_perm('add_person'):
return HttpResponseForbidden()
if request.method == 'POST':
form = PersonCreateForm(request.POST)
if form.is_valid():
p = form.save(commit=False)
base_slug = slugify("%s %s" % (p.first_name, p.last_name))
# hem de comprovar que no existeix cap persona amb aquest nom. Si no, hem d'afegir -1
tmp_slug = base_slug
trobat = True
counter = 0
while trobat:
try:
Person.objects.get(slug__iexact=tmp_slug)
counter = counter + 1
tmp_slug = "%s-%s" % (base_slug, str(counter))
except Person.DoesNotExist:
trobat = False
p.slug = tmp_slug
p.user_add = user
p.user_modify = user
p.date_registration = datetime.today()
p.save()
return HttpResponseRedirect(p.get_update_url())
else:
form = PersonCreateForm()
kwvars = {
'form': form
}
return render_to_response(template, kwvars, RequestContext(request))
def calculaStatus(person):
if person.status == 'cancelled':
return 'cancelled'
status = 'pendent'
if person.contact_type == 'R':
if person.revision == 'dataok':
if person.date_paid and person.paid:
status = 'ok_all'
else:
status = 'ok_notpaid'
# Revisam si fa molt de temps
if (datetime.now() - person.date_registration).days > 15:
status = 'notpaid_late'
else:
if person.date_paid and person.paid:
status = 'nook_paid'
else:
if (datetime.now() - person.date_registration).days > 15:
status = 'notpaid_late'
# else (status = 'pendent')
else:
# son organitzadors, patrocinadors i convidats. Nomes necessitam data ok, no pagat
if person.revision == 'dataok':
status = 'ok_all'
return status
def updateStatus(request,template='contacts/person/update_status.html'):
""" Update status of pending records
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
person_list = Person.objects.filter(status__in = ['pendent','ok_notpaid'])
registres = 0
for person in person_list:
status = calculaStatus(person)
if status != person.status:
person.status = status
person.save()
registres = registres + 1
return render_to_response(template, {'registres': registres}, RequestContext(request))
def update(request, slug, template='contacts/person/update.html'):
"""Update a person.
:param template: A custom template.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
user = request.user
if not user.has_perm('change_person'):
#todo: posar al missatge que no es pot realitzar l'accio si no es te permis
return detail(request,slug)
# return HttpResponseForbidden()
try:
person = Person.objects.get(slug__iexact=slug)
except Person.DoesNotExist:
raise Http404
if request.method == 'POST':
formId = PersonIdentificationForm(request.POST, instance=person)
formReg = PersonRegistrationForm(request.POST, instance=person)
formAdr = PersonAddressForm(request.POST, instance=person)
formLab = PersonLaboralForm(request.POST, instance=person)
formLevels = PersonLaboralLevelsForm(request.POST)
# formLab.data['laboral_levels'] = [int(x) for x in formLab.data['laboral_levels']]
if formId.is_valid() and formReg.is_valid() and formAdr.is_valid() and formLab.is_valid() and formLevels.is_valid():
person.user_modify = user
formId.save()
formReg.save()
formAdr.save()
person.laboral_levels = formLevels.cleaned_data.get('laboral_levels')
person.status = calculaStatus(person)
formLab.save()
return HttpResponseRedirect(person.get_absolute_url())
else:
formId = PersonIdentificationForm(instance=person)
formReg = PersonRegistrationForm(instance=person)
formAdr = PersonAddressForm(instance=person)
formLab = PersonLaboralForm(instance=person)
formLevels = PersonLaboralLevelsForm(initial={'laboral_levels': person.laboral_levels})
# print >> sys.stderr, 'Laboral levels = %s' % person.laboral_levels
# llista de persones consultades
if not request.session:
request.session={}
viewed_list = request.session.get('viewed',[])
if person in viewed_list:
viewed_list.remove(person)
viewed_list.insert(0,person) # d'aquesta manera estara al final
del viewed_list[8:10] # eliminam si hi ha moltes
request.session['viewed'] = viewed_list
kwvars = {
'id_form': formId,
'reg_form': formReg,
'adr_form': formAdr,
'lab_form': formLab,
'level_form': formLevels,
'object': person,
}
return render_to_response(template, kwvars, RequestContext(request))
def delete(request, slug, template='contacts/person/delete.html'):
"""Delete a person.
:param template: A custom template.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
user = request.user
if not user.has_perm('delete_person'):
return HttpResponseForbidden()
try:
person = Person.objects.get(slug__iexact=slug)
except Person.DoesNotExist:
raise Http404
if request.method == 'POST':
new_data = request.POST.copy()
if new_data['delete_person'] == 'Yes':
person.delete()
return HttpResponseRedirect(reverse('contacts_person_list'))
else:
return HttpResponseRedirect(person.get_absolute_url())
kwvars = {
'object': person
}
return render_to_response(template, kwvars, RequestContext(request))
def cancel(request, slug, template='contacts/person/cancel.html'):
""" Cancel a inscription // Or undo-cancel!
:param template: A custom template.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
user = request.user
if not user.has_perm('cancel_person'):
return HttpResponseForbidden()
try:
person = Person.objects.get(slug__iexact=slug)
except Person.DoesNotExist:
raise Http404
if request.method == 'POST':
new_data = request.POST.copy()
if new_data['cancel_person'] == 'Yes':
if person.status == 'cancelled':
person.status = 'pendent'
person.status = calculaStatus(person)
else:
person.status = 'cancelled'
person.user_modify = user
person.save()
return HttpResponseRedirect(person.get_absolute_url())
kwvars = {
'object': person,
}
return render_to_response(template, kwvars, RequestContext(request))
def lookup(request):
# Default return list
results = []
if request.method == "GET":
if request.GET.has_key(u'term'):
value = request.GET[u'term']
# Ignore queries shorter than length 3
if len(value) > 2:
model_results = Person.objects.filter(last_name__istartswith=value)
results = [ {'label' : x.fullname, 'value': x.id } for x in model_results ]
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
def revision(request, slug, template='contacts/person/revision.html'):
"""Delete a person.
:param template: A custom template.
"""
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/?next=%s' % request.path)
user = request.user
if not user.has_perm('revision_person'):
return HttpResponseForbidden()
try:
person = Person.objects.get(slug__iexact=slug)
except Person.DoesNotExist:
raise Http404
if request.method == 'POST':
return HttpResponseRedirect(person.get_absolute_url())
else:
form = RevisionCreateForm()
kwvars = {
'object': person,
'form': form
}
return render_to_response(template, kwvars, RequestContext(request))
|
989,647 | 7ffc6671635a9c6dbb70efaf83baf7caee9bff95 | import datetime
now = datetime.datetime.now()
print("current date and time is :",now)
print("in y:m:d h:m:s format : " )
print(now.strftime("%Y-%m-%d %H:%M:%S ")) |
989,648 | f5068cb674c62aedd39e412ccfb09d3f9d426ffd | # https://www.reddit.com/r/dailyprogrammer/comments/5llkbj/2017012_challenge_298_easy_too_many_parentheses/
# [298] [Easy] [Too many parenthesis]
# ((a((bc)(de)))f)
# (((zbcd)(((e)fg))))
# ab((c))
# ((a((bc)(de)))f)
# ((zbcd)((e)fg))
# ab(c)
def sol(exp):
if (exp == "()"): return ""
for i in range(0,len(exp)-1):
if (exp[i] == ')'):
ind = matching(exp, i)
if (exp[ind+1:i] == ''):
return sol(exp[:ind]+exp[i+1:])
if ((exp[i+1] == ')') and exp[ind-1] == '('):
return sol(exp[:ind-1] + exp[ind:i+1] + exp[i+2:])
return exp
def matching(exp,ind):
"""
Given an expression and a parenthesis, it returns
the mathching index
"""
stack = []
if exp[ind] != ')':
print ("Not a )")
return None
for i in range(0,ind):
if (exp[i] == '('):
stack.append(i)
elif (exp[i] == ')'):
stack.pop()
return stack[-1]
sol("()")
|
989,649 | 2248348304ee631407e525abaa1660fbdca61048 | """
Core routines for controlling the display board with a controller.
"""
|
989,650 | 3045c05da7797579443249f639b843387598ed19 | #!/usr/bin/env python3
import os
import atexit
import logging
def exitLogCleanup(*args):
"""Cleanup the logging file(s) prior to exiting"""
for logFile in args:
os.unlink(logFile)
return None
atexit.register(exitLogCleanup, snakemake.log[0])
logging.basicConfig(filename=snakemake.log[0], filemode="w", level=logging.DEBUG)
# the index position for each tax level in the bigtable.tsv - off by one because we capture the range as l[21:i]
tsvIndex = {
"kingdom": 24,
"phylum": 25,
"class": 26,
"order": 27,
"family": 28,
"genus": 29,
"species": 30,
}
idxStart = 23
short = {"23": "k", "24": "p", "25": "c", "26": "o", "27": "f", "28": "g", "29": "s"}
logging.debug(f"Opening {snakemake.output[0]} for writing\n")
out = open(snakemake.output[0], "w")
out.write("sampleID\ttaxonLevel\ttaxonPath\ttaxonName\tcount\tpercent\n")
# re-read the file for each sample to keep the memory happy - this is probably not necessary
for sample in snakemake.params.samples:
logging.debug(f"parsing {snakemake.input[0]} for sample {sample}\n")
counts = {} # counts[taxlevel][taxname] = int
cpm = {} # normalised counts, same structure as counts
infh = open(snakemake.input[0], "r")
infh.readline() # skip header
for line in infh:
l = line.split("\t")
if l[1] == sample:
for t, i in tsvIndex.items():
try:
if len(l[i].strip()) == 0:
continue
except IndexError:
continue
try:
counts[t]
cpm[t]
except KeyError:
counts[t] = {}
cpm[t] = {}
taxPath = []
for o in range(idxStart, i):
taxPath.append(
f"{short[str(o)]}_{l[o]}"
) # taxon path = k_kingName,p_phylName etc.
outPath = ",".join(taxPath)
try:
counts[t][outPath] += int(l[2])
cpm[t][outPath] += float(l[3])
except KeyError:
counts[t][outPath] = int(l[2])
cpm[t][outPath] = float(l[3])
infh.close()
for taxLevel in counts.keys():
for taxPath in counts[taxLevel].keys():
taxName = taxPath.split("_")[-1]
out.write(
f"{sample}\t{taxLevel}\t{taxPath}\t{taxName}\t{counts[taxLevel][taxPath]}\t{cpm[taxLevel][taxPath]}\n"
)
out.close()
|
989,651 | 8175bf2f6d68e427f70bb8bedebae59bdaca4408 | # Kmp failure function
a = raw_input()
b = raw_input()
pattern = a + b
matched, n = 0, len(pattern)
pi = [0]*n
for i in range(1, n):
while matched > 0 and pattern[i] != pattern[matched]:
matched = pi[matched-1]
if pattern[i] == pattern[matched]:
matched += 1
pi[i] = matched
l = []
while n > 0:
l.append(n)
n = pi[n - 1]
print(" ".join([str(i) for i in reversed(l)]))
|
989,652 | 8a212c64dfea9859cd41cea1386e1802076720e3 | from math import *
def fk(x, h, n, p):
# I'm recursive :)
if n == 1:
return df(x, h)
n -= 1
return (2 ** (n * p) * fk(x, h/2, n, p) - fk(x, h, n, p)) / (2 ** (n * p) - 1)
fs = ["cos(x**x)",
"sin(x)",
"x**(cos(x))",
"e**(-x**2)"]
dfs = ["(f(p)-f(p-h))/h",
"(f(p+h)-f(p-h))/(2*h)",
"(f(p-2*h)-8*f(p-h)+8*f(p+h)-f(p+2*h))/(12*h)"]
hs = [0.1, 0.05, 0.025, 0.0125]
ns = [4, 2, 1] # ordem do erro
x0 = 1 # ponto
p = 1 # b ???????????
for q in range(len(fs)):
def f(x):
return eval(fs[q])
print(f"### Questão {q+1}:")
print("|h|erro O(h)|erro O(h^2)|erro O(h^4)|")
print("|--|--|--|--|")
for h in hs:
r = []
for i in range(len(dfs)):
def df(p, h):
return eval(dfs[i])
r.append(fk(x0, h, ns[i], p))
print("", h, *r, "", sep="|")
print()
# import sympy as sy
# x = sy.Symbol('x')
# f = sy.sympify('x ** x')
# df = sy.diff(f, x, 2).subs(x, x0).evalf()
# print('exact:', df) |
989,653 | 6cd39e9aaf4d3ae3a298c9205c800cc5abd57f83 | '''
Given a non-empty list of words, return the k most frequent elements.
Your answer should be sorted by frequency from highest to lowest. If two words have the same frequency, then the word with the lower alphabetical order comes first.
Example 1:
Input: ["i", "love", "leetcode", "i", "love", "coding"], k = 2
Output: ["i", "love"]
Explanation: "i" and "love" are the two most frequent words.
Note that "i" comes before "love" due to a lower alphabetical order.
Example 2:
Input: ["the", "day", "is", "sunny", "the", "the", "the", "sunny", "is", "is"], k = 4
Output: ["the", "is", "sunny", "day"]
Explanation: "the", "is", "sunny" and "day" are the four most frequent words,
with the number of occurrence being 4, 3, 2 and 1 respectively.
Note:
1. You may assume k is always valid, 1 ≤ k ≤ number of unique elements.
2. Input words contain only lowercase letters.
Follow up:
1. Try to solve it in O(n log k) time and O(n) extra space.
'''
#BF: (Use Counter)
from collections import Counter
class Solution:
def topKFrequent(self, words: List[str], k: int) -> List[str]:
wordDict = dict(Counter(words))
sortedWord = sorted(wordDict.items(), key = lambda kv : (-kv[1], kv[0]))
res = []
count = 0
for i in range(len(sortedWord)):
print(sortedWord[i][0])
res.append(sortedWord[i][0])
if i == k-1:
break
return res
|
989,654 | 8ca9e5b1f74e7c5ad93c305ef679841a0b9375d3 | from django import forms
from .models import chef
class chef_form(forms.Form):
chef_code = forms.IntegerField()
chef_name = forms.CharField()
chef_age = forms.IntegerField()
chef_area = forms.CharField()
class chefFormSet(forms.ModelForm):
class Meta:
model = chef
fields = '__all__' |
989,655 | c4b899a045f6833bca62ac04de4308875202228e | import mysql.connector
mysqlConnector = mysql.connector.connect(
host="13.209.50.185",
user="root",
passwd="cww1003",
database="choi"
)
mycursor = mysqlConnector.cursor()
latest_product_name = "test11"
before_latestProd_name = "b-test11"
sql = "INSERT INTO yj_mall_update_record (mall_name, kor_mall_name, latest_product_name, product_before_update, createdDate) VALUES (%s, %s, %s, %s, NOW())"
mycursor.execute(sql, ("realv", "리얼브이", latest_product_name, before_latestProd_name))
# mysqlConnector.commit()
mysqlConnector.commit()
|
989,656 | 4c4cd9316a1b7757427c17e246ed324c1a2bf8e0 | import torch.nn as nn
class SimpleDNN(nn.Module):
def __init__(self, in_dim, hidden_dim, out_dim, depth, act_fn):
super(SimpleDNN, self).__init__()
assert depth > 0
self.act_fn = act_fn
self.linears = nn.ModuleList()
if depth == 1:
self.linears.append(nn.Linear(in_dim, out_dim))
else:
self.linears.append(nn.Linear(in_dim, hidden_dim))
for i in range(depth-1):
if i == depth-2:
self.linears.append(nn.Linear(hidden_dim, out_dim))
else:
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
# for regularizer
self.kernel_weights = []
for linear in self.linears:
self.kernel_weights.append(linear.weight)
def forward(self, x):
for lay in self.linears[:-1]:
x = self.act_fn(lay(x))
out = self.linears[-1](x)
return out
|
989,657 | e5fde9354494255bdbd7a87694071ae6bdf2c7e5 | import os
# Reports false error on Linux as LnkParse3 is Windows-only dependency
import LnkParse3 # type: ignore
from typing import List
from pcbdraw.convert_common import chooseInkscapeCandidate
def detectInkscape() -> str:
"""
Return path to working Inkscape >v1.0 executable
"""
candidates = []
if "PCBDRAW_INKSCAPE" in os.environ:
# Ensure there is the .com extension needed for CLI interface
path = os.path.splitext(os.environ["PCBDRAW_INKSCAPE"])[0] + ".com"
candidates.append(path)
candidates.append("inkscape") # Inkscape in path
candidates += readInkscapeFromStartMenu()
return chooseInkscapeCandidate(candidates)
def readInkscapeFromStartMenu() -> List[str]:
candidates = []
for profile in [os.environ.get("ALLUSERSPROFILE", ""), os.environ.get("USERPROFILE", "")]:
path = os.path.join(profile, "Microsoft", "Windows", "Start Menu",
"Programs", "Inkscape", "Inkscape.lnk")
try:
with open(path, "rb") as f:
lnk = LnkParse3.lnk_file(f)
abspath = os.path.realpath(lnk.string_data.relative_path())
# The .com version provides CLI interface
abspath = os.path.splitext(abspath)[0] + ".com"
candidates.append(abspath)
except FileNotFoundError as e:
continue
return candidates
if __name__ == "__main__":
print(detectInkscape())
|
989,658 | 8eb8908eb1381b5ca68bf3adce2075a055db7acf | import sys
from PyQt5.QtWidgets import QApplication,QWidget,QToolTip,QPushButton
from PyQt5.QtGui import QFont,QIcon
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
QToolTip.setFont(QFont('SansSerif',10))#使用了10px的SansSerif字体
#调用setTooltip()创建提示框可以使用富文本格式的内容。
self.setToolTip('This is a <b>QWidget</b> widget')
#创建了一个按钮,并且为按钮添加了一个提示框。
btn = QPushButton('Button',self)
btn.setToolTip('This is a <b>QPushButton</b> widget')
#调整按钮大小,并让按钮在屏幕上显示出来,sizeHint()方法提供了一个默认的按钮大小。
btn.resize(btn.sizeHint())
btn.move(50,50)
self.setGeometry(300,300,300,220)
self.setWindowTitle('Tool Tips')
self.setWindowIcon(QIcon('dog.png'))
self.show()
if __name__=='__main__':
app = QApplication(sys.argv)
ex = Example()
app.exit(app.exec_())
|
989,659 | 042af55c26d350b03084a4aa9be3b1b08caf7523 | import unittest
import os
import boto3
import responses
import json
import urllib.parse as urlparse
import decimal
from moto import mock_dynamodb2
from lambdas.aqi_GET import lambda_function as aqi_route
def decimal_default(obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
class TestCase(unittest.TestCase):
def given_dynamo_table_exists(self):
dynamodb = boto3.resource("dynamodb", os.environ.get("DYNAMODB_REGION"))
table = dynamodb.create_table(
TableName=os.environ.get("DYNAMODB_AQI_TABLE"),
KeySchema=[
{
"AttributeName": "PartitionKey",
"KeyType": "HASH"
},
],
AttributeDefinitions=[
{
"AttributeName": "PartitionKey",
"AttributeType": "S"
},
],
ProvisionedThroughput={
"ReadCapacityUnits": 1,
"WriteCapacityUnits": 1
}
)
def given_api_routes_mocked(self):
def _aqi_request_callback(request):
parsed = urlparse.urlparse(request.url)
event = {
"zipCode": urlparse.parse_qs(parsed.query)["zipCode"][0]
}
return (200, {}, json.dumps(aqi_route.lambda_handler(event, {}), default=decimal_default))
responses.add_callback(
responses.GET, "{}/aqi".format(os.environ.get("AIR_QUALITY_API_URL").lower()),
callback=_aqi_request_callback
)
def given_airnow_routes_mocked(self):
def _airnow_api_request_callback(request):
parsed = urlparse.urlparse(request.url)
zip_code = urlparse.parse_qs(parsed.query)["zipCode"][0]
data = {
"94501": [{"DateObserved":"2018-12-02 ","HourObserved":14,"LocalTimeZone":"PST","ReportingArea":"Oakland","StateCode":"CA","Latitude":37.8,"Longitude":-122.27,"ParameterName":"O3","AQI":30,"Category":{"Number":1,"Name":"Good"}},{"DateObserved":"2018-12-02 ","HourObserved":14,"LocalTimeZone":"PST","ReportingArea":"Oakland","StateCode":"CA","Latitude":37.8,"Longitude":-122.27,"ParameterName":"PM2.5","AQI":15,"Category":{"Number":1,"Name":"Good"}}],
"52328": []
}[zip_code]
return (200, {}, json.dumps(data))
def _airnow_request_callback(request):
parsed = urlparse.urlparse(request.url)
zip_code = urlparse.parse_qs(parsed.query)["zipcode"][0]
map_url = {
"94501": "https://files.airnowtech.org/airnow/today/cur_aqi_sanfrancisco_ca.jpg"
}[zip_code]
data = "<html><img src=\"{}\" width=\"525\" height=\"400\" border=\"0\" style=\"position:relative\" usemap=\"#CurMap\"/></html>".format(map_url)
return (200, {}, data)
responses.add_callback(
responses.GET, "http://www.airnowapi.org/aq/observation/zipCode/current/",
callback=_airnow_api_request_callback
)
responses.add_callback(
responses.GET, "https://airnow.gov/index.cfm",
callback=_airnow_request_callback
)
def given_airnow_api_server_error(self):
def _airnow_api_request_callback(request):
return (500, {}, "Internal Server Error")
responses.add_callback(
responses.GET, "http://www.airnowapi.org/aq/observation/zipCode/current/",
callback=_airnow_api_request_callback
)
def given_airnow_api_bad_response(self):
def _airnow_api_request_callback(request):
return (200, {}, "<WebServiceError><Message>Invalid API key</Message></WebServiceError>")
responses.add_callback(
responses.GET, "http://www.airnowapi.org/aq/observation/zipCode/current/",
callback=_airnow_api_request_callback
)
def load_resource(self, filename):
example_file = open(os.path.join(os.path.dirname(__file__), "resources", filename), "rb")
json_str = example_file.read().decode("utf-8")
event = json.loads(json_str)
example_file.close()
return event
|
989,660 | d72b3614115433e2c7c7ebe07b7d4886a3faa46f | #!/usr/bin/env python3
import sys
try:
from OSC import OSCServer
except ImportError:
sys.path.append("/home/ubiuser/deploy/pyosc/")
from OSC import OSCServer
from time import sleep
server = OSCServer( ("0.0.0.0", 3333) )
server.timeout = 0
run = True
# this method of reporting timeouts only works by convention
# that before calling handle_request() field .timed_out is
# set to False
def handle_timeout(self):
self.timed_out = True
# funny python's way to add a method to an instance of a class
import types
server.handle_timeout = types.MethodType(handle_timeout, server)
#we use tuio events now just to determine clicks.
#this is the list of clicks since prev clear
#tuio/multitaction id mapped to x,y coordpair
#only first data for an id is used - as in 'touchbegin' / 'mousedown'
clicks = {}
def user_callback(path, tags, args, source):
# which user will be determined by path:
# we just throw away all slashes and join together what's left
user = ''.join(path.split("/"))
# tags will contain 'fff'
# args is a OSCMessage with data
# source is where the message came from (in case you need to reply)
print ("Now do something with", user,args[2],args[0],1-args[1])
def tuio_callback(path, tags, args, source):
print path
print tags
print args
print source
def tuio2Dcur_callback(path, tags, args, source):
#print tags
if args[0] in ['alive', 'fseq']:
return
#print path,
#print args
_, num, x, y, a, b, c = args
#print num, x, y
if num not in clicks:
clicks[num] = (source, x, y)
#print clicks
#print source
def default_handler(addr, tags, stuff, source):
#print "SERVER: No handler registered for ", addr
return None
def quit_callback(path, tags, args, source):
# don't do this at home (or it'll quit blender)
global run
run = False
server.addMsgHandler( "/user/1", user_callback )
server.addMsgHandler( "/user/2", user_callback )
server.addMsgHandler( "/user/3", user_callback )
server.addMsgHandler( "/user/4", user_callback )
server.addMsgHandler( "/user/5", user_callback )
server.addMsgHandler( "/user/6", user_callback )
server.addMsgHandler( "/tuio/2Dcur", tuio2Dcur_callback )
server.addMsgHandler("default", default_handler)
server.addMsgHandler( "/quit", quit_callback )
# user script that's called by the game engine every frame
def each_frame():
# clear timed_out flag
server.timed_out = False
# handle all pending requests then return
while not server.timed_out:
server.handle_request()
if __name__ == '__main__':
# simulate a "game engine"
while run:
# do the game stuff:
#sleep(1)
# call user script
each_frame()
server.close()
|
989,661 | dd4a0453f7ee96b8cf5a1a0bf78de65d50629707 | """Base class for Tasks."""
from oslo_log import log as logging
from asciipic.tasks import base
from asciipic.common import exception
LOG = logging.getLogger(__name__)
class ExampleTask(base.BaseTask):
"""Base class for Tasks."""
def _on_task_done(self, result):
"""What to execute after successfully finished processing a task."""
LOG.info("We could save this in oracle database is required.")
LOG.info("Result %s", result)
def _on_task_fail(self, exc):
"""What to do when the program fails processing a task."""
LOG.info("We could cleanup some resources or log the error")
raise exc
def _prologue(self):
"""Executed once before the taks running."""
LOG.info("We could create a database connection.")
def _work(self):
"""Override this with your desired procedures."""
return "Task Done"
def _epilogue(self):
"""Executed once after the taks running."""
LOG.info("We could clean up the database connection.")
def __call__(self):
"""Run the task."""
result = None
try:
self._prologue()
result = self._work()
self._epilogue()
except exception.AsciipicException as exc:
self._on_task_fail(exc)
else:
self._on_task_done(result)
return result
|
989,662 | 8e51bfed97b7040e00ca4cad11512d55237ece7c | """
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param: head: ListNode head is the head of the linked list
@param: m: An integer
@param: n: An integer
@return: The head of the reversed ListNode
"""
def reverseBetween(self, head, m, n):
if not head:
return
"""
to get to `n`th node from `m`th node needs `n - m` operations
"""
n -= m
"""
`A` will stay at `m-1`th node
"""
A = dummy = ListNode(-1, head)
while m > 1:
m -= 1
A = A.next
"""
`B` will stay at `n+1`th node
`cur` stay at (`m`th -> `n`th) node
"""
B = cur = A.next
pre = nxt = None
while n >= 0:
n -= 1
nxt = B.next
B.next = pre
pre = B
B = nxt
A.next = pre
cur.next = B
return dummy.next
|
989,663 | 267d9bedd292039dea2fb0b1c5f08c4fa1a2f292 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os
# DATA packaged
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
# ML packages
import tensorflow as tf
from keras import backend as K
K.clear_session
# Word embedding
import gensim
#from gensim.models import Word2Vec
from gensim.models.keyedvectors import KeyedVectors
# Text tockenization
from nltk.tokenize import sent_tokenize, word_tokenize
# Miscellaneous
from random import sample
from functools import reduce
from collections import Counter
import itertools # itertools.repeat(x, 3)
###############################################################################
global FOLDER_NN_MODELS, DATA_FOLDER
# Directory of the folder where data and word embeddings are located
PROJECT_FOLDER = "./"
DATA_FOLDER = PROJECT_FOLDER + "data/"
FOLDER_NN_MODELS = PROJECT_FOLDER + "nn_models/"
global NUM_FOR_TEST # How many batches to use for testing
NUM_FOR_TEST = 64*5
# READ AND PREPROCESS LOCAL FILES
exec(open(PROJECT_FOLDER + "read_sentences.py").read())
###############################################################################
# Network parameters
flags = tf.app.flags
FLAGS = flags.FLAGS
# General Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 100, "Dimensionality of word embedding (default: 300)")
tf.flags.DEFINE_integer("vocab_size", 20000, "Vocabulary")
tf.flags.DEFINE_integer("sent_len", 30, "Maximum sentence length")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("clip_gradient", 5, "Clip the norm of the gradients to 5")
tf.flags.DEFINE_float("learning_rate", 0.001, "Default Adam learning rate")
# RNN hyperparameters
tf.flags.DEFINE_integer("hidden_units", 512, "The size of the hidden cell layer")
tf.flags.DEFINE_integer("hidden_units_large", 1024, "The size of the hidden cell layer")
#tf.flags.DEFINE_float('learning_rate', 0.01, 'Learning rate for the optimization algorithms')
# Session Configuraion parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
# TBD
tf.flags.DEFINE_integer("intra_op_parallelism_threads", 4, "Nodes that can use multiple threads to parallelize their execution will schedule the individual pieces into this pool.")
tf.flags.DEFINE_integer("inter_op_parallelism_threads", 4, "All ready nodes are scheduled in this pool.")
tf.flags.DEFINE_integer("intra_op_parallelism_threads_test", 1, "Nodes that can use multiple threads to parallelize their execution will schedule the individual pieces into this pool.")
tf.flags.DEFINE_integer("inter_op_parallelism_threads_test", 1, "All ready nodes are scheduled in this pool.")
session_conf_cluster = tf.ConfigProto(
allow_soft_placement = FLAGS.allow_soft_placement,
log_device_placement = FLAGS.log_device_placement,
intra_op_parallelism_threads = FLAGS.intra_op_parallelism_threads,
inter_op_parallelism_threads = FLAGS.inter_op_parallelism_threads,
)
session_conf_test = tf.ConfigProto(
allow_soft_placement = FLAGS.allow_soft_placement,
log_device_placement = FLAGS.log_device_placement,
intra_op_parallelism_threads = FLAGS.intra_op_parallelism_threads_test,
inter_op_parallelism_threads = FLAGS.inter_op_parallelism_threads_test,
)
###############################################################################
def prepare_batch(df_inp,
batch_size = FLAGS.batch_size,
sent_len = FLAGS.sent_len,
null_elem = vocab_dict["<pad>"]):
"""
prepare standardized batches
Example:
df_inp = train_df_enc[: 46,:]
df_out, added = prepare_batch(df_inp)
"""
df_out, added = df_inp, 0
if len(df_inp) < batch_size:
added = batch_size - len(df_inp)
tmp = null_elem * np.ones((added, FLAGS.sent_len))
df_out = np.concatenate((df_inp, tmp), axis=0)
return (df_out, added)
|
989,664 | 8833564fd07a266800c0a92875b0474c6e90d3d1 | # draw a triangle
import turtle
t = turtle.Turtle()
t.forward(100)
t.left(120)
t.forward(100)
t.left(120)
t.forward(100)
t.left(120) |
989,665 | 63ad82c283fa7a98ffdc917a60bdae13f7025207 | import os
from datetime import datetime, timedelta
from django.test import TestCase
from mock import patch, call
from oauthlib.oauth2 import TokenExpiredError, InvalidClientError
from hgw_common.utils.mocks import MockOAuth2Session
os.environ['DJANGO_SETTINGS_MODULE'] = 'hgw_common.test.settings'
from hgw_common.models import OAuth2SessionProxy, AccessToken
class OAuthProxyTest(TestCase):
def setUp(self):
self.service_url = 'https://oauth2service'
self.client_id = 'id'
self.client_secret = 'secret'
def test_create_proxy(self):
"""
Tests that when the proxy is instantiated a token is created.
"""
with patch('hgw_common.models.OAuth2Session', new_callable=MockOAuth2Session) as mock:
m = mock(200)
OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)
# The datetime object has a precision to 10e-6 seconds while the timestamp 10e-7.
# This precision is irrelevant in this case but we need to modify the original value
m.token['expires_at'] = datetime.fromtimestamp(m.token['expires_at']).timestamp()
mock.assert_called()
self.assertEqual(AccessToken.objects.count(), 1)
self.assertDictEqual(AccessToken.objects.first().to_python(), mock().token)
def test_access_token_creation_fail(self):
with patch('hgw_common.models.OAuth2Session', MockOAuth2Session):
MockOAuth2Session.RAISES = InvalidClientError
self.assertRaises(InvalidClientError, OAuth2SessionProxy, self.service_url,
self.client_id, self.client_secret)
MockOAuth2Session.RAISES = None
def test_access_token_from_db(self):
"""
Tests that, when the proxy is instantiated and an access token is found in the db, the db token is used
:return:
"""
token_data = {'access_token': 'OUfprCnmdJbhYAIk8rGMex4UBLXyf3',
'token_type': 'Bearer',
'expires_in': 36000,
'expires_at': (datetime.now() + timedelta(hours=10)).isoformat(),
'scope': 'read write'}
AccessToken.objects.create(token_url=self.service_url, **token_data)
with patch('hgw_common.models.OAuth2Session', new_callable=MockOAuth2Session) as mock:
mock(200)
OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)
# The datetime object has a precision to 10e-6 seconds while the timestamp 10e-7.
# This precision is irrelevant in this case but we need to modify the original value
# m.token['expires_at'] = datetime.fromtimestamp(m.token['expires_at']).timestamp()
mock.assert_called()
self.assertEqual(AccessToken.objects.count(), 1)
self.assertEqual(AccessToken.objects.first().access_token, token_data['access_token'])
def test_access_token_reused(self):
"""
Tests that, if the token has already been created and two subsequent calls returns 200, it is used the same token
"""
with patch('hgw_common.models.OAuth2Session', MockOAuth2Session):
MockOAuth2Session.RESPONSES = [200, 200]
proxy = OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)
m = proxy._session
first_token = m.token['access_token']
proxy.get("/fake_url/1/")
second_token = m.token['access_token']
proxy.get("/fake_url/2/")
third_token = m.token['access_token']
self.assertEqual(len(m.get.call_args_list), 2) # Number of calls
m.get.assert_has_calls([call('/fake_url/1/'), call('/fake_url/2/')])
m.fetch_token.assert_called_once()
self.assertEqual(AccessToken.objects.count(), 1)
self.assertEqual(first_token, second_token, third_token)
def test_access_token_refreshed_for_401_response_with_get_method(self):
"""
Tests that, when the response is 401 (Unauthorized), another token is created and the call is perfomed again
"""
with patch('hgw_common.models.OAuth2Session', MockOAuth2Session):
MockOAuth2Session.RESPONSES = [401]
proxy = OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)
session = proxy._session
first_token = session.token['access_token']
proxy.get("/fake_url/1/")
second_token = session.token['access_token']
self.assertEqual(len(session.get.call_args_list), 2) # Number of calls
self.assertEqual(len(session.fetch_token.call_args_list), 2) # Number of calls
session.get.assert_has_calls([call('/fake_url/1/'), call('/fake_url/1/')])
self.assertEqual(AccessToken.objects.count(), 1)
self.assertNotEquals(first_token, second_token)
def test_access_token_refreshed_for_401_response_with_post_method(self):
"""
Tests that, when the response is 401 (Unauthorized), another token is created and the call is perfomed again
"""
with patch('hgw_common.models.OAuth2Session', MockOAuth2Session):
MockOAuth2Session.RESPONSES = [401]
proxy = OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)
session = proxy._session
first_token = session.token['access_token']
proxy.post("/fake_url/1/")
second_token = session.token['access_token']
self.assertEqual(len(session.post.call_args_list), 2) # Number of calls
self.assertEqual(len(session.fetch_token.call_args_list), 2) # Number of calls
session.post.assert_has_calls([call('/fake_url/1/'), call('/fake_url/1/')])
self.assertEqual(AccessToken.objects.count(), 1)
self.assertNotEquals(first_token, second_token)
def test_access_token_refreshed_for_token_expired_with_get_method(self):
"""
Tests that, when the response is 401 (Unauthorized), another token is created and the call is perfomed again
"""
with patch('hgw_common.models.OAuth2Session', MockOAuth2Session):
MockOAuth2Session.RESPONSES = [TokenExpiredError(), 200]
proxy = OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)
session = proxy._session
first_token = session.token['access_token']
# m.token['expires_at'] = m.token['expires_at'] - 36001
proxy.get("/fake_url/1/")
second_token = session.token['access_token']
self.assertEqual(len(session.get.call_args_list), 2) # Number of calls
self.assertEqual(len(session.fetch_token.call_args_list), 2) # Number of calls
session.get.assert_has_calls([call('/fake_url/1/'), call('/fake_url/1/')])
self.assertEqual(AccessToken.objects.count(), 1)
self.assertNotEquals(first_token, second_token)
def test_access_token_refreshed_for_token_expired_with_post_method(self):
"""
Tests that, when the response is 401 (Unauthorized), another token is created and the call is perfomed again
"""
with patch('hgw_common.models.OAuth2Session', MockOAuth2Session):
MockOAuth2Session.RESPONSES = [TokenExpiredError(), 200]
proxy = OAuth2SessionProxy(self.service_url, self.client_id, self.client_secret)
session = proxy._session
first_token = session.token['access_token']
# m.token['expires_at'] = m.token['expires_at'] - 36001
proxy.post("/fake_url/1/")
second_token = session.token['access_token']
self.assertEqual(len(session.post.call_args_list), 2) # Number of calls
self.assertEqual(len(session.fetch_token.call_args_list), 2) # Number of calls
session.post.assert_has_calls([call('/fake_url/1/'), call('/fake_url/1/')])
self.assertEqual(AccessToken.objects.count(), 1)
self.assertNotEquals(first_token, second_token) |
989,666 | 5c2fce7452b63c8ea76f0179906610ce8d55730f | """This tests passing local_ns and global_ns (for backwards compatibility only)
at activation of an embedded shell."""
from IPython.terminal.embed import InteractiveShellEmbed
user_ns = dict(cookie='monster')
ISE = InteractiveShellEmbed(
banner1='check cookie in locals, and globals empty')
ISE(local_ns=user_ns, global_ns={})
|
989,667 | ea49a1099fd97a88aab8931911064af206c352bd | from distutils.core import setup
import os
PKG_VERSION = os.environ.get('PACKAGE_VERSION') or '1.9.0'
setup(
name='python3-indy',
version=PKG_VERSION,
packages=['indy'],
url='https://github.com/hyperledger/indy-sdk',
license='MIT/Apache-2.0',
author='Vyacheslav Gudkov',
author_email='vyacheslav.gudkov@dsr-company.com',
description='This is the official SDK for Hyperledger Indy (https://www.hyperledger.org/projects), which provides a distributed-ledger-based foundation for self-sovereign identity (https://sovrin.org). The major artifact of the SDK is a c-callable library.',
install_requires=['pytest<3.7', 'pytest-asyncio', 'base58'],
tests_require=['pytest<3.7', 'pytest-asyncio', 'base58']
)
|
989,668 | 3bf3deb17c2da6a5f496ced029636ed2857bf555 | # main model
import pickle
import numpy as np
from numpy.random import random
import math
import spams
from spams import spams.omp
from keras.models import Sequential
from keras.layers import LSTM
from sklearn.decomposition import SparseCoder
print 'Starting up...'
# load preprocessed data
U = pickle.load(open('U.pkl', 'r')) # common embedding matrix
vocab = pickle.load(open('vocab.pkl', 'r')) # vocabulary
codes = pickle.load(open('word.codes', 'r')) # sparse codes for common words
w = []
#print U
#def sparse_loss(y_pred, w):
# '''
# Helper function. Custom loss function to be used in model.
# '''
# v = np.dot(U, y_pred) - w
# v = v + alpha * np.sum(y_pred)
# v = v + beta * abs(np.sum(y_pred) - 1)
#
# return math.sqrt(np.dot(v, v))
print 'Initializing parameters...'
# initialize parameters
alpha = .002
beta = .2
max_len = 24
# compute codes
coder = SparseCoder(U, transform_algorithm='lasso_cd', transform_alpha = alpha, split_sign=True)
X = []
y = []
keys = []
for word in vocab:
# if word.startswith('http://'):
# continue
#
# if word.startswith('https://'):
# continue
keys.append(word)
X.append(random(2000))
y.append(vocab[word])
print np.shape(U)
print np.shape(X)
X = coder.fit_transform(X, y)
print np.shape(X)
codes = {}
for k in range(len(keys)):
codes[keys[k]] = X[k]
pickle.dump(codes, open('word.codes', 'w'))
#print codes['m.v.p.']
print keys[-1] + ': ' + str(codes[keys[-1]])
#print vocab[keys[-1]]
|
989,669 | 7e9117726dc946e693e96663129b56e4edcd0a1c | import pandas as pd
df = pd.read_csv('data/enrollment.csv')
df["status"] = "allowed"
info = df["course name"] == "information technology"
freshmen = df["year"] == 1
df.loc[info & freshmen, "status"] = "not allowed"
commerce = df["course name"] == "commerce"
senior = df["year"] == 4
df.loc[commerce & senior, "status"] = "not allowed"
allowed = df["status"] == "allowed"
course_counts = df.loc[allowed, "course name"].value_counts()
closed_courses = list(course_counts[course_counts < 5].index)
for course in closed_courses:
df.loc[df["course name"] == course, "status"] = "not allowed"
df
|
989,670 | 63fffa3ef0136d5d160d39032eba9ac181e71378 | import tensorflow as tf
import os
def brain_dataset(path, image_size=None, augment_function=None, num_parallel_calls=tf.data.experimental.AUTOTUNE):
def get_subdirectory_files(subdir):
sub_path = os.path.join(path, subdir)
return sorted([os.path.join(dp, f) for dp, dn, fn in
os.walk(os.path.expanduser(sub_path), followlinks=True) for f in
fn if f.endswith('.png')])
def parse_function(image_path, labels_path):
image = _read_image(image_path)
labels = _read_labels(labels_path)
if augment_function:
image, labels = augment_function(image, labels)
if image_size:
image = tf.image.resize(image, image_size)
labels = tf.image.resize(labels, image_size, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return image, labels
data_files = get_subdirectory_files('images/images')
target_files = get_subdirectory_files('labels/labels')
assert len(data_files) == len(target_files)
ds = tf.data.Dataset.from_tensor_slices((data_files, target_files)) \
.shuffle(len(data_files)) \
.map(parse_function, num_parallel_calls)
return ds, len(data_files)
@tf.function
def _read_image(path):
x = tf.io.read_file(path)
x = tf.image.decode_png(x, 3)
x = tf.image.convert_image_dtype(x, tf.float32)
return x
@tf.function
def _read_labels(path):
x = tf.io.read_file(path)
x = tf.image.decode_png(x, 1)
x = tf.cast(x, tf.int32)
return x
|
989,671 | d7d8d600a2a6df3c81c27b6c97ad81882fda6d55 | #!/usr/bin/python
import os
import re
import nltk
import sys
import getopt
import codecs
import struct
import math
import io
import collections
import timeit
RECORD_TIME = False # toggling for recording the time taken for indexer
BYTE_SIZE = 4 # docID is in int
"""
conducts boolean queries from queries_file and writes outputs to output_file
params:
dictionary_file: dictionary file produced by indexer
postings_file: postings file produced by indexer
queries_file: file of boolean queries
output_file: responses to boolean queries
"""
def search(document_directory, dictionary_file, postings_file, queries_file, output_file):
docs_list = [docs for docs in os.listdir(document_directory)]
n = [i for i in range(len(docs_list))]
doc_dict = dict(zip(n, docs_list))
# open files
dict_file = codecs.open(dictionary_file, encoding='utf-8')
post_file = io.open(postings_file, 'rb')
query_file = codecs.open(queries_file, encoding='utf-8')
out_file = open(output_file, 'w')
# load dictionary to memory
loaded_dict = load_dictionary(dict_file)
dictionary = loaded_dict[0] # dictionary map
indexed_docIDs = loaded_dict[1] # list of all docIDs indexed in sorted order
dict_file.close()
# process each query
queries_list = query_file.read().splitlines()
for i in range(len(queries_list)):
query = queries_list[i]
result = process_query(query, dictionary, post_file, indexed_docIDs)
# write each result to output
for j in range(len(result)):
docID = result[j]
out_file.write(doc_dict[docID])
if (j != len(result) - 1):
out_file.write(', ')
if (i != len(queries_list) - 1):
out_file.write('\n')
# close files
post_file.close()
query_file.close()
out_file.close()
"""
returns 2-tuple of loaded dictionary and total df
params:
dict_file: opened dictionary file
"""
def load_dictionary(dict_file):
dictionary = {} # dictionary map loaded
indexed_docIDs = [] # list of all docIDs indexed
docIDs_processed = False # if indexed_docIDs is processed
# load each term along with its df and postings file pointer to dictionary
for entry in dict_file.read().split('\n'):
# if entry is not empty (last line in dictionary file is empty)
if (entry):
# if first line of dictionary, process list of docIDs indexed
if (not docIDs_processed):
indexed_docIDs = [int(docID) for docID in entry[20:-1].split(',')]
docIDs_processed = True
# else if dictionary terms and their attributes
else:
token = entry.split(" ")
term = token[0]
df = int(token[1])
offset = int(token[2])
dictionary[term] = (df, offset)
return (dictionary, indexed_docIDs)
"""
returns the list of docIDs in the result for the given query
params:
query: the query string e.g. 'bill OR Gates AND (vista OR XP) AND NOT mac'
dictionary: the dictionary in memory
indexed_docIDs: the list of all docIDs indexed (used for negations)
"""
def process_query(query, dictionary, post_file, indexed_docIDs):
stemmer = nltk.stem.porter.PorterStemmer() # instantiate stemmer
# prepare query list
query = query.replace('(', '( ')
query = query.replace(')', ' )')
query = query.split(' ')
results_stack = []
postfix_queue = collections.deque(shunting_yard(query)) # get query in postfix notation as a queue
while postfix_queue:
token = postfix_queue.popleft()
result = [] # the evaluated result at each stage
# if operand, add postings list for term to results stack
if (token != 'AND' and token != 'OR' and token != 'NOT'):
token = stemmer.stem(token) # stem the token
# default empty list if not in dictionary
if (token in dictionary):
result = load_posting_list(post_file, dictionary[token][0], dictionary[token][1])
# else if AND operator
elif (token == 'AND'):
right_operand = results_stack.pop()
left_operand = results_stack.pop()
# print(left_operand, 'AND', left_operand) # check
result = boolean_AND(left_operand, right_operand) # evaluate AND
# else if OR operator
elif (token == 'OR'):
right_operand = results_stack.pop()
left_operand = results_stack.pop()
# print(left_operand, 'OR', left_operand) # check
result = boolean_OR(left_operand, right_operand) # evaluate OR
# else if NOT operator
elif (token == 'NOT'):
right_operand = results_stack.pop()
# print('NOT', right_operand) # check
result = boolean_NOT(right_operand, indexed_docIDs) # evaluate NOT
# push evaluated result back to stack
results_stack.append(result)
# print ('result', result) # check
# NOTE: at this point results_stack should only have one item and it is the final result
if len(results_stack) != 1: print ("ERROR: results_stack. Please check valid query") # check for errors
return results_stack.pop()
"""
returns posting list for term corresponding to the given offset
params:
post_file: opened postings file
length: length of posting list (same as df for the term)
offset: byte offset which acts as pointer to start of posting list in postings file
"""
def load_posting_list(post_file, length, offset):
post_file.seek(offset)
posting_list = []
for i in range(length):
posting = post_file.read(BYTE_SIZE)
docID = struct.unpack('I', posting)[0]
posting_list.append(docID)
return posting_list
"""
returns the list of postfix tokens converted from the given infix expression
params:
infix_tokens: list of tokens in original query of infix notation
"""
def shunting_yard(infix_tokens):
# define precedences
precedence = {}
precedence['NOT'] = 3
precedence['AND'] = 2
precedence['OR'] = 1
precedence['('] = 0
precedence[')'] = 0
# declare data strucures
output = []
operator_stack = []
# while there are tokens to be read
for token in infix_tokens:
# if left bracket
if (token == '('):
operator_stack.append(token)
# if right bracket, pop all operators from operator stack onto output until we hit left bracket
elif (token == ')'):
operator = operator_stack.pop()
while operator != '(':
output.append(operator)
operator = operator_stack.pop()
# if operator, pop operators from operator stack to queue if they are of higher precedence
elif (token in precedence):
# if operator stack is not empty
if (operator_stack):
current_operator = operator_stack[-1]
while (operator_stack and precedence[current_operator] > precedence[token]):
output.append(operator_stack.pop())
if (operator_stack):
current_operator = operator_stack[-1]
operator_stack.append(token) # add token to stack
# else if operands, add to output list
else:
output.append(token.lower())
# while there are still operators on the stack, pop them into the queue
while (operator_stack):
output.append(operator_stack.pop())
# print ('postfix:', output) # check
return output
"""
returns the list of docIDs which is the compliment of given right_operand
params:
right_operand: sorted list of docIDs to be complimented
indexed_docIDs: sorted list of all docIDs indexed
"""
def boolean_NOT(right_operand, indexed_docIDs):
# complement of an empty list is list of all indexed docIDs
if (not right_operand):
return indexed_docIDs
result = []
r_index = 0 # index for right operand
for item in indexed_docIDs:
# if item do not match that in right_operand, it belongs to compliment
if (item != right_operand[r_index]):
result.append(item)
# else if item matches and r_index still can progress, advance it by 1
elif (r_index + 1 < len(right_operand)):
r_index += 1
return result
"""
returns list of docIDs that results from 'OR' operation between left and right operands
params:
left_operand: docID list on the left
right_operand: docID list on the right
"""
def boolean_OR(left_operand, right_operand):
result = [] # union of left and right operand
l_index = 0 # current index in left_operand
r_index = 0 # current index in right_operand
# while lists have not yet been covered
while (l_index < len(left_operand) or r_index < len(right_operand)):
# if both list are not yet exhausted
if (l_index < len(left_operand) and r_index < len(right_operand)):
l_item = left_operand[l_index] # current item in left_operand
r_item = right_operand[r_index] # current item in right_operand
# case 1: if items are equal, add either one to result and advance both pointers
if (l_item == r_item):
result.append(l_item)
l_index += 1
r_index += 1
# case 2: l_item greater than r_item, add r_item and advance r_index
elif (l_item > r_item):
result.append(r_item)
r_index += 1
# case 3: l_item lower than r_item, add l_item and advance l_index
else:
result.append(l_item)
l_index += 1
# if left_operand list is exhausted, append r_item and advance r_index
elif (l_index >= len(left_operand)):
r_item = right_operand[r_index]
result.append(r_item)
r_index += 1
# else if right_operand list is exhausted, append l_item and advance l_index
else:
l_item = left_operand[l_index]
result.append(l_item)
l_index += 1
return result
"""
returns list of docIDs that results from 'AND' operation between left and right operands
params:
left_operand: docID list on the left
right_operand: docID list on the right
"""
def boolean_AND(left_operand, right_operand):
# perform 'merge'
result = [] # results list to be returned
l_index = 0 # current index in left_operand
r_index = 0 # current index in right_operand
l_skip = int(math.sqrt(len(left_operand))) # skip pointer distance for l_index
r_skip = int(math.sqrt(len(right_operand))) # skip pointer distance for r_index
while (l_index < len(left_operand) and r_index < len(right_operand)):
l_item = left_operand[l_index] # current item in left_operand
r_item = right_operand[r_index] # current item in right_operand
# case 1: if match
if (l_item == r_item):
result.append(l_item) # add to results
l_index += 1 # advance left index
r_index += 1 # advance right index
# case 2: if left item is more than right item
elif (l_item > r_item):
# if r_index can be skipped (if new r_index is still within range and resulting item is <= left item)
if (r_index + r_skip < len(right_operand)) and right_operand[r_index + r_skip] <= l_item:
r_index += r_skip
# else advance r_index by 1
else:
r_index += 1
# case 3: if left item is less than right item
else:
# if l_index can be skipped (if new l_index is still within range and resulting item is <= right item)
if (l_index + l_skip < len(left_operand)) and left_operand[l_index + l_skip] <= r_item:
l_index += l_skip
# else advance l_index by 1
else:
l_index += 1
return result
if __name__ == '__main__':
search(document_directory = 'D:\Python_projects\Files',
dictionary_file = 'D:\Python_projects\dictionary.json',
postings_file = 'D:\Python_projects\postings_file.json',
queries_file = 'D:\Python_projects\queries.json' ,
output_file = 'D:\Python_projects\output.json')
"""
prints the proper command usage
"""
# def print_usage():
# print ("usage: " + sys.argv[0] + " -d dictionary-file -p postings-file -q file-of-queries -o output-file-of-results")
# dictionary_file = postings_file = queries_file = output_file = None
# try:
# opts, args = getopt.getopt(sys.argv[1:], 'd:p:q:o:')
# except (getopt.GetoptError, err):
# usage()
# sys.exit(2)
# for o, a in opts:
# if o == '-d':
# dictionary_file = a
# elif o == '-p':
# postings_file = a
# elif o == '-q':
# queries_file = a
# elif o == '-o':
# output_file = a
# else:
# assert False, "unhandled option"
# if (dictionary_file == None or postings_file == None or queries_file == None or output_file == None):
# print_usage()
# sys.exit(2)
# if (RECORD_TIME): start = timeit.default_timer() # start time
# search(dictionary_file, postings_file, queries_file, output_file) # call the search engine on queries
# if (RECORD_TIME): stop = timeit.default_timer() # stop time
# if (RECORD_TIME): print ('Querying time:' + str(stop - start)) # print time taken
|
989,672 | 8deab702e2ddcdba89da00388e181cd61a88e7fd | # _*_coding:utf-8_*_
# 创建用户 :chenzhengwei
# 创建日期 :2019/7/15 下午3:59
"""
"""
Student = type('Student', (object,), {'name': "chenzecc"})
stu1 = Student()
print(getattr(stu1, 'name', None))
def __init__(self, name):
self.name = name
def test(self):
print('0000')
People = type('People', (object,), {"__init__": __init__, "test": test})
people = People("chenzw")
print(people.name)
people.test()
class Base(object):
pass
class NewBase(Base):
foo = True
Foo = type('Foo', (NewBase,), {'foo': False})
foo = Foo()
print(foo.foo)
class ModelMetaClass(type):
def __new__(cls, class_name, class_parents, class_attrs):
print(class_name, class_parents, class_attrs)
mapping = dict()
for name, value in class_attrs.items():
if isinstance(value, tuple):
print("Found mapping %s --> %s" % (name, value))
mapping[name] = value
for name in mapping.keys():
class_attrs.pop(name)
class_attrs["__mapping__"] = mapping
class_attrs["__table__"] = class_name
return type.__new__(cls, class_name, class_parents, class_attrs)
class User(metaclass=ModelMetaClass):
uid = ('uid', "int unsigned")
name = ('username', "varchar(30)")
email = ('email', "varchar(30)")
password = ('password', "varchar(30)")
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def save(self):
fields = []
args = []
for k, v in self.__mapping__.items():
fields.append(v[0])
args.append(getattr(self, k, None))
sql = "insert into %s(%s) values(%s)" % (self.__table__, ",".join(fields), ",".join([str(i) for i in args]))
print("SQL: %s" % sql)
# u = User(uid=123, name="zfx", email="zfx@zfx.com", password="6666")
# u.save()
class A(type):
def __new__(cls, class_name, class_parents, class_attrs):
print(class_name, class_parents, class_attrs)
return type.__new__(cls, class_name, class_parents, class_attrs)
class B(metaclass=A):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
b = B(x=1)
|
989,673 | d2ff70cdacd1ad5ec4482484b609e60c4b0b1cfa | #!/usr/bin/env python3
# -*- coding: latin-1 -*-
from flask import jsonify
def url_parser(url):
split_index = url.find('?') # returns first index of '?', in case there are ? in the body
url = url[split_index + 1:]
if url :
dict_response = {}
for chunk in url.split('&'):
[key, value] = chunk.split('=')
print(key, value)
dict_response[str(key)] = value
return dict_response
else:
return {}
def serialize_get_query(connection, query):
jsonish = dict()
jsonish["success"] = True
jsonish["result"] = []
try:
for t in query.fetchall(): # returns a tuple
tmp_dict = {}
for index, key in enumerate(query.keys()):
tmp_dict[key] = t[index]
jsonish["result"].append(tmp_dict)
connection = connection.close()
return jsonify(jsonish)
except:
return error_json()
def error_json(custom=None):
jsonish = dict()
jsonish["success"] = False
jsonish["result"] = []
if custom:
jsonish["result"] = str(custom)
return (jsonish)
def success_json(**kwargs):
# kwargs let us pass custom dictionary to pass into the return json
jsonish = {}
jsonish["success"] = True
jsonish["result"] = []
if kwargs:
jsonish["result"].append(kwargs)
return jsonify(jsonish)
|
989,674 | 5fa51448fd07c1c955b7033a0ca00de013f8b917 | # create a new empty set
s = set()
# adding to set (value)
s.add(1)
s.add(3)
s.add(0)
s.add(42)
s.add(12)
# no element appears twice
s.add(3)
#remove elements (value)
s.remove(12)
print(s)
# len()
print(f"length of s: {len(s)}") |
989,675 | fb2e4271df60fd9c868c36e3caa9266a8a9498b1 | x,y = input().split()
numHayBales = int(x)
farmersPurchasing = int(y)
prices = []
for i in range (farmersPurchasing):
prices.append(int(input()))
prices.sort()
totals = []
for i in range (len(prices)):
if farmersPurchasing-i > numHayBales:
totals.append([(prices[i]*(numHayBales)),prices[i]])
else:
totals.append([(prices[i]*(farmersPurchasing-i)),prices[i]])
totals.sort(reverse=True)
print (totals[0][1],end=" ")
print (totals[0][0],end=" ")
|
989,676 | 5a750c71a7ae32f4aed4dd0878861582f0289f5c | from test_plus import TestCase
from ..factories import BlogCommentFactory
class BlogCommentTestCase(TestCase):
def test_get_descendants_reversely(self):
parent = BlogCommentFactory()
descendants = BlogCommentFactory.create_batch(10, parent=parent)
self.assertEqual(parent.get_descendants_reversely().count(), 10)
self.assertQuerysetEqual(parent.get_descendants_reversely(), map(repr, descendants))
|
989,677 | 49e7c40869ee10a8dcbbe866fedf9458dd7b0b04 | import os
from Bio import SeqIO
from snapgene_reader import snapgene_file_to_seqrecord
def load_records(path):
if isinstance(path, (list, tuple)):
return [record for p in path for record in load_records(p)]
no_extension, extension = os.path.splitext(path)
fmt = {".fa": "fasta", ".gb": "genbank", ".gbk": "genbank", ".dna": "snapgene"}[
extension
]
if fmt == "snapgene":
records = [snapgene_file_to_seqrecord(path)]
else:
records = list(SeqIO.parse(path, fmt))
for i, record in enumerate(records):
if str(record.id) in ["None", "", "<unknown id>", ".", " "]:
record.id = path.replace("/", "_").replace("\\", "_")
if len(records) > 1:
record.id += "_%04d" % i
return records
|
989,678 | b6050a0a2455b8c2cc99e4ac175f927d6af64be5 | #!/usr/bin/python3
"""
Pascal Triangle
"""
def pascal_triangle(n):
"""
Function that returns a list of lists of integers
representing the Pascal’s triangle of n
Arguments:
- n will be always an integer
Returns:
An empty list if n <= 0
"""
pascal = []
for i in range(1, n + 1):
row = []
for j in range(i):
if j == 0 or j == i - 1:
row.append(1)
else:
n = pascal[i - 2][j - 1] + pascal[i - 2][j]
row.append(n)
pascal.append(row)
return pascal
|
989,679 | 87b8189a0d4a78553e3467bc6437c4cd8fe3721c | a = float(input(" Digite um numero: "))
b = float(input(" Digite um numero: "))
c = float(input(" Digite um numero: "))
x = (a ** 2 + b ** 2 + c ** 2) / (a + b + c)
print (round(x , 7)) |
989,680 | 1b3d6bd994058bb280bf5c64953d715cf45774a0 | # F strings
import math
me = "JITU"
a1 = 3
a12 = "this is %s %s"%(me, a1)
a = "This is {1} {0}"
b = a.format(me, a1)
print(b)
print(a1)
a = f"this is {me} {a1} {math.cos(65)}"
# time
print(a)
|
989,681 | 650528d43328c1beac7b98961885734564296653 | #Faça um Programa que receba
# uma quantidade pré determinada de notas e mostre as notas e a média na tela
# a = False
# while a == False:
# qtd = input('Informe quantas notas você gostaria de analisar: ')
# try:
# qtd = int(qtd)
# if qtd > 0:
# a == True
# break
# else:
# a = False
# except:
# a = False
#None === valor vazio
qtd = 0
while qtd <= 0:
try:
qtd = int(input('Informe quantas notas você gostaria de analisar: '))
except:
qtd = 0
#print(qtd)
a = 0
nota = 0
nota2 = []
while a < qtd:
try:
nota = float(input('Insira a nota: '))
# nota = float(nota)
if nota >= 0:
nota2.append(nota)
a += 1
# else:
# nota = 0
except:
# nota = 0
pass
print(nota2)
soma = 0
for i in nota2:
soma += i
media = soma/qtd
for i in nota2:
print('Nota: %.2f' % (i))
print('Média das notas: %.2f ' % (media))
# import math
# print(math.pi)
|
989,682 | 86a30d544a908e7a0a7565e8417d970962f06335 |
from bs4 import BeautifulSoup
from catalog.models import Product
import requests
from decimal import Decimal
from abc import ABC, abstractmethod
class AbstractParser(ABC):
@abstractmethod
def parse(self):
pass
@abstractmethod
def get_soup(self):
pass
@abstractmethod
def subscribe_for_parsed_product(self, subscriber):
pass
@abstractmethod
def unsubscribe(self, subscriber):
pass
@abstractmethod
def _get_product_dict(self, product):
pass
@abstractmethod
def _scrap_product_attribute_value(self, product):
pass
@abstractmethod
def _scrap_product_page(self, href):
pass
@abstractmethod
def _scrap_product_desription(self, item):
pass
@abstractmethod
def _scrap_product_attribute_value(self, item):
pass
@abstractmethod
def _scrap_product_files(self, item):
pass
@abstractmethod
def _scrap_images(self, soup):
pass
class BaseParser(AbstractParser):
"""Базовый класс парсера.
Содержит в себе определение
базовых методов и полей.
Если обязательные поля и/или методы не были переопределены
в дочерних классах, то при инициализации будет поднято исключение.
"""
URL = ""
URL_BODY = ""
PRODUCT_ID_DATA_ATTR = ""
def __init__(self):
self._soup = self._get_base_soup()
# Список подписчиков, которых будет необходимо уведомлять
# о появлении нового товара
self._subs = list()
# Словарь, который будет передан подписчикам
self._product_for_import = dict()
self._validate_required_fields()
def parse(self):
"""Базовый метод, который обязан иметь каждый дочерний от Base класс."""
raise NotImplementedError("Метод parse должен быть переопределен")
def get_soup(self, url):
"""Получить экземпляр BeautifulSoup по переданному урлу.
Returns:
BeautifulSoup: Инстанс бс4, готовый к парсингу
"""
request = requests.get(url)
if request.status_code != 200:
raise ConnectionError(f"Не был получен ответ по адресу: {url}")
return BeautifulSoup(request.content.decode('utf-8','ignore'))
def subscribe_for_parsed_product(self, subscriber):
"""Метоод подписки на обновление спаршенного продукта.
Args:
subscriber: Объект-подписчик, у которого обязательно
должен быть определен метод `on_notify`
"""
# Проверяем наличие метода у подписчика
import_product = getattr(subscriber, "on_notify", None)
if not import_product:
raise NotImplementedError("Каждый подписчик должен обладать методом on_notify!")
self._subs.append(subscriber)
def unsubscribe(self, subscriber):
self._subs.remove(subscriber)
def _notify(self):
for subscriber in self._subs:
subscriber.on_notify(self._product_for_import.copy())
def _validate_required_fields(self):
is_valid = True
if self.URL is None or self.URL == "":
is_valid = False
if self.URL_BODY is None or self.URL_BODY == "":
is_valid = False
if self.PRODUCT_ID_DATA_ATTR is None or self.PRODUCT_ID_DATA_ATTR == "":
is_valid = False
if self._soup is None:
is_valid = False
if self._product_for_import is None:
is_valid = False
# Массив subs на момент иницилизации объекта может быть пустым
# поэтому его пропускаем
if not is_valid:
raise NotImplementedError("Не все обязательные поля класса были переопределены")
def _get_id(self, product):
id_ = product.attrs.get(self.PRODUCT_ID_DATA_ATTR, None)
if not id_:
# Если блок не содержит идентификатора - вернуть новый ид из бд
return Product.get_next()
try:
result = int(id_)
except ValueError:
result = Product.get_next()
return result
def _get_price(self, value):
# Удаляем пробелы из строки цены
value = value.replace(" ", "")
try:
return Decimal(value)
except ValueError:
raise TypeError("Товар не содержит цены")
def _clear(self, value: str) -> str:
"""Удалить пробельные символы в строке.
Args:
value (str): Строка, содержащая пробельные символы
Returns:
str: Строка, которая не содержит пробельных символов
"""
return value.replace("\n", "").replace("\t", "")
def _get_base_soup(self):
return self.get_soup(self.URL)
|
989,683 | 9f8a50a8785f39b87c56107127b8fb7e789bb2bb | #!/usr/bin/env python3
import subprocess, re, sys, glob
from collections import defaultdict
path = 'hfo2/**/*.rs'
if len(sys.argv) > 1:
path = sys.argv[1]
if subprocess.call('which count-unsafe > /dev/null 2>&1', shell=True) != 0:
print('''Please install count-unsafe by\n
`rustup update nightly && cargo +nightly install --git https://github.com/Medowhill/count-unsafe`''')
exit(-1)
if subprocess.call('which cloc > /dev/null 2>&1', shell=True) != 0:
print('''Please install cloc by `apt install cloc`''')
exit(-1)
space = re.compile(r'\s+')
unsafes = defaultdict(lambda: 0)
slocs = {}
for file in glob.glob(path, recursive=True):
for line in subprocess.check_output(['count-unsafe', file], universal_newlines=True).splitlines()[1:]:
file, begin, end, cnt, ty = line.split(',')
unsafes[file] += int(cnt)
stat = subprocess.check_output(['cloc', file], universal_newlines=True).splitlines()[-2].strip()
sloc = int(space.split(stat)[-1])
slocs[file] = sloc
print(f'{file}: {unsafes[file]}/{sloc} = {unsafes[file]*100//sloc}%')
print('Total:')
unsafe_total = sum(unsafes.values())
sloc_total = sum(slocs.values())
print(f'{unsafe_total}/{sloc_total} = {unsafe_total*100//sloc_total}%')
|
989,684 | 63ec33b83c89f75b121372a7b4390db14e3fb8a3 | import pypboy
import pygame
import game
import config
class Module(pypboy.SubModule):
label = " Weapons "
def __init__(self, *args, **kwargs):
super(Module, self).__init__((config.WIDTH, config.HEIGHT), *args, **kwargs)
handlers = []
item_names = []
INVENTORY = [
Weapon('Ranger Sequoia','images/inventory/RangerSequoia.png',62,30,104,100,''),
Weapon('Anti-materiel rifle','images/inventory/flamer.png',0,0,0,0,''),
Weapon('Pulse Grenade (2)','images/inventory/flamer.png',0,0,0,0,'')
]
selected = 0
for i in INVENTORY:
handlers.append(self.change_items)
item_names.append(i.name)
self.menu = pypboy.ui.Menu(200, item_names, handlers, selected, 15)
self.menu.rect[0] = 4
self.menu.rect[1] = 60
self.add(self.menu)
#show weapon image
weapon_to_display = INVENTORY[selected]
weapon_to_display.rect = weapon_to_display.image.get_rect()
weapon_to_display.image = weapon_to_display.image.convert()
weapon_to_display.rect[0] = 189
weapon_to_display.rect[1] = 40
print "RECTANGLE %s %s %s %s" % (weapon_to_display.rect[0],weapon_to_display.rect[1],weapon_to_display.rect[2],weapon_to_display.rect[3])
#Show Weapon stats - Value
#text = config.FONTS[14].render("%s" %(weapon_to_display.value), True, (95, 255, 177), (0, 0, 0))
#pygame.draw.line(weapon_to_display.image, (95, 255, 177), (config.WIDTH - 13, weapon_to_display.rect[1] + weapon_to_display.image.get_rect()[3] + 5 ), (config.WIDTH - 13, weapon_to_display.rect[1] + weapon_to_display.image.get_rect()[3] + 25), 2) #End of title Verticle bar
#weapon_to_display.image.blit(text, (config.WIDTH - (text.get_width() + 5), weapon_to_display.rect[1] + weapon_to_display.image.get_rect()[3] + 9))
#pygame.draw.line(weapon_to_display.image, (95, 255, 177), (config.WIDTH - 50, 15), (config.WIDTH - 13, weapon_to_display.rect[1] + weapon_to_display.image.get_rect()[3] + 5 ), 2) # Horizontal Bar
#pygame.draw.line(weapon_to_display.image, (95, 255, 177), (config.WIDTH - 13, 200 ), (config.WIDTH - 13, 215), 2) #End of title Verticle bar
#weapon_to_display.image.blit(text, (config.WIDTH - (text.get_width() + 5), weapon_to_display.rect[1] + weapon_to_display.image.get_rect()[3] + 9))
#pygame.draw.line(weapon_to_display.image, (95, 255, 177), (config.WIDTH - 50, 15), (config.WIDTH - 13, weapon_to_display.rect[1] + weapon_to_display.image.get_rect()[3] + 5 ), 2) # Horizontal Bar
#Test starts here
#Value
pygame.draw.line(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 2, 200-weapon_to_display.rect[1]), (weapon_to_display.rect[2] -2, 220-weapon_to_display.rect[1]), 2)#Verticle Bar
pygame.draw.line(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 85, 200-weapon_to_display.rect[1]), (weapon_to_display.rect[2], 200-weapon_to_display.rect[1]), 2) # Horizontal Bar
text = config.FONTS[14].render("25", True, (95, 255, 177), (0, 0, 0))
weapon_to_display.image.blit(text, (weapon_to_display.rect[2] - 0 - (text.get_width() + 5), 204-weapon_to_display.rect[1]))
text = config.FONTS[14].render("VAL", True, (95, 255, 177), (0, 0, 0))
weapon_to_display.image.blit(text, (weapon_to_display.rect[2] - 0 - 85 + 2, 204-weapon_to_display.rect[1]))
#Weight
pygame.draw.line(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 95, 200-weapon_to_display.rect[1]), (weapon_to_display.rect[2] - 95, 220-weapon_to_display.rect[1]), 2)#Verticle Bar
pygame.draw.line(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 95 - 85, 200-weapon_to_display.rect[1]), (weapon_to_display.rect[2] - 95, 200-weapon_to_display.rect[1]), 2) # Horizontal Bar
text = config.FONTS[14].render("4", True, (95, 255, 177), (0, 0, 0))
weapon_to_display.image.blit(text, (weapon_to_display.rect[2] - 95 - (text.get_width() + 5), 204-weapon_to_display.rect[1]))
text = config.FONTS[14].render("WG", True, (95, 255, 177), (0, 0, 0))
weapon_to_display.image.blit(text, (weapon_to_display.rect[2] - 95 - 85 + 2, 204-weapon_to_display.rect[1]))
#Damage
pygame.draw.line(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 190, weapon_to_display.rect[3] - 80 - weapon_to_display.rect[1]), (weapon_to_display.rect[2] - 190, weapon_to_display.rect[3] - 60 - weapon_to_display.rect[1]), 2)#Verticle Bar
pygame.draw.line(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 190 - 85, 200-weapon_to_display.rect[1]), (weapon_to_display.rect[2] - 190, 200-weapon_to_display.rect[1]), 2) # Horizontal Bar
text = config.FONTS[14].render("%s" %(weapon_to_display.damage), True, (95, 255, 177), (0, 0, 0))
weapon_to_display.image.blit(text, (weapon_to_display.rect[2] - 190 - (text.get_width() + 5), 204-weapon_to_display.rect[1]))
text = config.FONTS[14].render("DAM", True, (95, 255, 177), (0, 0, 0))
weapon_to_display.image.blit(text, (weapon_to_display.rect[2] - 190 - 85 + 2, 204-weapon_to_display.rect[1]))
#Row 2
pygame.draw.line(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 2, 230-weapon_to_display.rect[1]), (weapon_to_display.rect[2] - 2, 250-weapon_to_display.rect[1]), 2)
text = config.FONTS[14].render("-- --", True, (95, 255, 177), (0, 0, 0))
weapon_to_display.image.blit(text, (weapon_to_display.rect[2] - 95 - 85 + 2, 234-weapon_to_display.rect[1]))
pygame.draw.line(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 95 - 85, 230-weapon_to_display.rect[1]), (weapon_to_display.rect[2], 230-weapon_to_display.rect[1]), 2) # Horizontal Bar
#Condition
pygame.draw.line(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 190, 230-weapon_to_display.rect[1]), (weapon_to_display.rect[2] - 190, 250-weapon_to_display.rect[1]), 2)#Verticle Bar
pygame.draw.line(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 190 - 85, 230-weapon_to_display.rect[1]), (weapon_to_display.rect[2] - 190, 230-weapon_to_display.rect[1]), 2) # Horizontal Bar
cndlength = 50
pygame.draw.rect(weapon_to_display.image, (95, 255, 177), (weapon_to_display.rect[2] - 190 - 55,237-weapon_to_display.rect[1],40,12)) #Condition bar
pygame.draw.rect(weapon_to_display.image, (0, 70, 0), (weapon_to_display.rect[2] - 190 - 55 + 40,237-weapon_to_display.rect[1],10,12))#Filler bar
text = config.FONTS[14].render("CND", True, (95, 255, 177), (0, 0, 0))
weapon_to_display.image.blit(text, (weapon_to_display.rect[2] - 190 - 85 + 2, 234-weapon_to_display.rect[1]))
#Test ends here
self.add(weapon_to_display)
def change_items(self):
print "Changing"
class Weapon(game.Entity):
def __init__(self, name, imageloc, damage, weight, value, condition, notes):
super(Weapon, self).__init__((config.WIDTH, config.HEIGHT))
self.name = name
self.imageloc = imageloc
self.image = pygame.image.load(self.imageloc)
self.damage = damage
self.weight= weight
self.value = value
self.condition = condition
self.notes = notes
|
989,685 | e21f2181ee3f9c71277cd65fc5b6ac7c5d5e44cd | # coding=utf-8
# -*- conding=utf-8 -*-
# encoding: utf-8
# --==================================================================
# By 函数的属性:
# ID = "id"
# XPATH = "xpath"
# LINK_TEXT = "link text"
# PARTIAL_LINK_TEXT = "partial link text"
# NAME = "name"
# TAG_NAME = "tag name"
# CLASS_NAME = "class name"
# CSS_SELECTOR = "css selector"
# --==================================================================
from test_case.models import settings
from selenium.webdriver.common.by import By
from .base import Page
import time
class NewContractPage(Page):
# 合同管理
contract_tab_loc = (By.XPATH,'//*[@id="bottomTabs_htgl"]/a')
botom_frame_loc = 'bottomTabs_Content_Iframe'
def clickContractTab(self):
self.find_element(*self.contract_tab_loc).click()
self.scrollToElement('id',self.botom_frame_loc) # 页面跳转到底部【客户详情】所在的位置--相当于滚动条
self.switchToOneFrame(self.botom_frame_loc) # 切换到底部【客户详情】的iframe
# 创建及输入合同详情
newContract_loc = (By.ID,'span4') #新合同
createNewContract_loc = (By.ID,'btnCreateContract') #创建合同
contract_frame_loc = '//*[@id="layui-layer-iframe3"]' #创建合同页面的frame
def openNewContractPage(self):
self.find_element(*self.newContract_loc).click()
self.setWaitTime(20)
self.find_element(*self.createNewContract_loc).click()
self.switchWindow()
self.switchToOneFrameByXpath(self.contract_frame_loc)
#输入合同详情
VIP_contract_type_loc = (By.ID,'contractType1') #债权会员合同
BPO_contract_type_loc = (By.ID,'contractType2') #债权外包合同
contract_code_loc = 'selContract'#合同号
def inputContractDetail_VIP(self,index=0):
self.index = index
self.find_element(*self.VIP_contract_type_loc).click()
self.getDropdownMenuById(self.contract_code_loc, index)
def inputContractDetail_BPO(self,index=0):
self.index = index
self.find_element(*self.BPO_contract_type_loc).click()
self.getDropdownMenuById(self.contract_code_loc, index)
#选择合同客户名称
contract_name_loc = 'selType'
def selectContractName(self,index=0):
self.index = index
self.getDropdownMenuById(self.contract_name_loc, index)
#预估额度
predit_loanAmt_loc = (By.ID,'txtPreditAmount')
moveTo_predit_loanAmt_loc = 'txtPreditAmount'
def inputPreLoanAmt(self,value=100):
self.value = value
self.find_element(*self.predit_loanAmt_loc).clear()
self.find_element(*self.predit_loanAmt_loc).send_keys(value)
#意向金
intention_Amt_loc = (By.ID,'txtFee')
def inputIntentionAmt(self,value=5000):
self.value = value
self.find_element(*self.intention_Amt_loc).clear()
self.find_element(*self.intention_Amt_loc).send_keys(value)
#输入服务费率
house_rate_loc = (By.ID,'txtHouseRate')#房抵贷
car_rate_loc = (By.ID,'txtCarRate')#车抵贷
credit_rate_loc = (By.ID,'txtCreditRate')#信用贷款
creditCard_rate_loc = (By.ID,'txtCreditCardRate')#信用卡贷款
bridge_rate_loc = (By.ID,'txtBridgeRate')#过桥垫资
cmp_rate_loc = (By.ID,'txtCompanyRate')#企业贷款
def inputServiceRate(self):
self.input_value(self.house_rate_loc,8)
self.input_value(self.car_rate_loc,10)
self.input_value(self.credit_rate_loc,5)
self.input_value(self.creditCard_rate_loc,15)
self.input_value(self.bridge_rate_loc,0.05)
self.input_value(self.cmp_rate_loc,1)
#输入合同起始日期
contract_startDate_loc = 'txtServiceStart'
def selectConStartDate(self,value='2018-01-20'):
self.value = value
self.getDateTimePicker(self.contract_startDate_loc,value)
#输入代扣协议
isassigned_loc = '//*[@id="form1"]/table/tbody/tr[12]/th/span'
bank1_loc = (By.ID,'bankCard')#银行卡号1
phone1_loc = (By.ID,'bankCardPhone')#银行卡对应手机号1
accont_bank1_loc = (By.ID,'openBank1')#开户银行1
bank2_loc = (By.ID,'bankCard2')#银行卡号2
phone2_loc = (By.ID,'bankCardPhone2')#银行卡对应手机号2
accont_bank2_loc = (By.ID,'openBank2')#开户银行2
bank3_loc = (By.ID,'bankCard3')#银行卡号3
phone3_loc = (By.ID,'bankCardPhone3')#银行卡对应手机号3
accont_bank3_loc = (By.ID,'openBank3')#开户银行3
def inputAgreementDetail(self):
self.scrollToElement('xpath',self.isassigned_loc)
self.input_value(self.bank1_loc,'6225142536475869000')
self.input_value(self.phone1_loc,'13247586921')
self.input_value(self.accont_bank1_loc,'中国工商银行太仓支行')
self.input_value(self.bank2_loc,'6225142536475869001')
self.input_value(self.phone2_loc,'13247586922')
self.input_value(self.accont_bank2_loc,'中国建设银行西湖支行')
self.input_value(self.bank3_loc,'6225142536475869002')
self.input_value(self.phone3_loc,'13247586923')
self.input_value(self.accont_bank3_loc,'中国交通银行罗山支行')
#备注
contract_memo_loc = (By.ID,'txtMemo')
moveTo_contract_memo_loc = 'txtMemo'
def inputContractMo(self,value='测试创建新合同!'):
self.value = value
self.scrollToElement('id',self.moveTo_contract_memo_loc)
self.find_element(*self.contract_memo_loc).clear()
self.find_element(*self.contract_memo_loc).send_keys(value)
#上传合同附件
get_elem_way = 'xpath'
# elem1 = 'btnUpFile'
# elem2 = 'btnUpFile2'
# elem3 = 'btnUpFile3'
elem1 =(By.XPATH, '//*[@id="picker1"]/div[2]/input')
elem2 = (By.XPATH,'//*[@id="picker2"]/div[2]/input')
elem3 = (By.XPATH,'//*[@id="picker3"]/div[2]/input')
elem4 = (By.XPATH,'//*[@id="pickerSignFile"]/div[2]/input')
elem5 = (By.XPATH,'//*[@id="pickerIdCardFile"]/div[2]/input')
elem6 = (By.XPATH,'//*[@id="pickerBankCardFile"]/div[2]/input')
elem7 = (By.XPATH,'//*[@id="pickerContractPaperFile"]/div[2]/input')
elem8 = (By.XPATH,'//*[@id="pickerInfoServiceFile"]/div[2]/input')
file_path = settings.Contract_file
# uploadBtn = (By.ID,'btnUp')
# uploadBtn2 = (By.ID, 'btnUp2')
# uploadBtn3 = (By.ID, 'btnUp3')
uploadBtn = (By.ID,'btnUpload_picker1')
uploadBtn2 = (By.ID, 'btnUpload_picker2')
uploadBtn3 = (By.ID, 'btnUpload_picker3')
uploadBtn4 = (By.ID,'btnUpload_pickerSignFile')
uploadBtn6 = (By.ID,'btnUpload_pickerBankCardFile')
uploadBtn8 = (By.ID,'btnUpload_pickerInfoServiceFile')
moveto_btn3_loc = 'btnUpload_picker3'
# def uploadContractFile(self):
# self.uploadFile(self.getWay,self.elem1,self.file_path) # 特批事项附件
# self.find_element(*self.uploadBtn).click()
# self.setWaitTime(30)
# self.uploadFile(self.getWay,self.elem2,self.file_path) # 营业执照/法人资料附件
# self.find_element(*self.uploadBtn2).click()
# self.setWaitTime(30)
# self.uploadFile(self.getWay,self.elem3,self.file_path) # 合同原件附件
# self.find_element(*self.uploadBtn3).click()
# self.setWaitTime(30)
def uploadContractFile(self):
self.uploadFile2(self.elem1,self.file_path)
self.click_element(*self.uploadBtn)
time.sleep(5)
self.waitElmentUntill(50,self.elem2)
self.uploadFile2(self.elem2,self.file_path)
self.waitElmentUntill(20,self.uploadBtn2)
time.sleep(1)
self.click_element(*self.uploadBtn2)
self.uploadFile2(self.elem3,self.file_path)
self.waitElmentUntill(20,self.uploadBtn3)
time.sleep(1)
self.click_element(*self.uploadBtn3)
self.scrollToElement('id',self.moveto_btn3_loc)
self.uploadFile2(self.elem4,self.file_path)
self.waitElmentUntill(20,self.uploadBtn4)
time.sleep(1)
self.click_element(*self.uploadBtn4)
self.uploadFile2(self.elem6,self.file_path)
self.waitElmentUntill(20,self.uploadBtn6)
time.sleep(1)
self.click_element(*self.uploadBtn6)
self.uploadFile2(self.elem8,self.file_path)
self.waitElmentUntill(20,self.uploadBtn8)
time.sleep(1)
self.click_element(*self.uploadBtn8)
time.sleep(1)
#提交合同
moveto_save_btn_loc = 'btnSave'
save_loc = (By.ID,'btnSave')
def submitContract(self):
self.find_element(*self.save_loc).click()
#=============================================================================================================================================
#转会员
popWinMax_loc = (By.XPATH,'//*[@id="layui-layer3"]/span[1]/a[2]')
change_VIPContract_loc = (By.PARTIAL_LINK_TEXT,"转会员") #转会员按钮
pop_new_frame_loc = "//iframe[contains(@id, 'layui-layer-iframe')]"
def openChangeToVIPContract(self):
self.find_element(*self.newContract_loc).click()
time.sleep(1)
self.find_element(*self.change_VIPContract_loc).click()
self.switchWindow()
time.sleep(1)
self.switchToOneFrameByXpath(self.pop_new_frame_loc)
#eee
#==============================================================================================================================================
# 创建会员合同
def createVIPContract(self):
self.clickContractTab()
self.openNewContractPage()
time.sleep(1)
self.inputContractDetail_VIP()
self.selectContractName()
self.selectConStartDate()
self.inputPreLoanAmt()
self.inputAgreementDetail()
self.inputContractMo()
self.uploadContractFile()
self.submitContract()
time.sleep(3)
self.close_alert()
time.sleep(1)
#合同创建完毕回到合同列表页
def gobackToContractlist(self):
self.switchToParentFrame()
#=======================================================================================================================
# 创建普通外包合同
def createBPOContract(self):
self.clickContractTab()
self.openNewContractPage()
time.sleep(1)
self.inputContractDetail_BPO()
self.selectContractName()
self.selectConStartDate()
self.inputPreLoanAmt()
self.inputIntentionAmt()
self.inputContractMo()
self.inputAgreementDetail()
self.scrollToElement('id',self.moveto_save_btn_loc)
self.uploadContractFile()
self.submitContract()
time.sleep(3)
self.close_alert()
time.sleep(1)
#self.close_alert()
# 创建条线外包合同
def create_productline_BPOContract(self):
self.clickContractTab()
self.openNewContractPage()
time.sleep(1)
self.inputContractDetail_BPO()
self.selectContractName()
self.selectConStartDate()
self.inputPreLoanAmt()
self.inputIntentionAmt()
self.inputServiceRate()
self.inputContractMo()
self.inputAgreementDetail()
self.scrollToElement('id',self.moveto_save_btn_loc)
self.uploadContractFile()
self.submitContract()
time.sleep(3)
self.close_alert()
time.sleep(1)
#self.close_alert()
#====================================================================================================================
# 外包合同转会员
def BPOContractTransToVIP(self):
self.clickContractTab()
self.openChangeToVIPContract()
self.scrollToElement('id',self.moveTo_predit_loanAmt_loc)
self.inputPreLoanAmt()
self.scrollToElement('id',self.moveTo_contract_memo_loc)
self.inputContractMo()
self.uploadContractFile()
self.submitContract()
time.sleep(3)
self.close_alert()
# ============================================================================================
# 验证case的执行结果: 未完待续
fapo_btn_loc = (By.XPATH,'//*[@id="main"]/div[1]/div[2]/div[1]/div/div/div[2]/div[2]/table/tbody/tr/td[1]/div/div')
def verifyContractCreateSucess(self):
return self.find_element(*self.fapo_btn_loc).text |
989,686 | 1ca080df75a07e3dc7b6d4219ac0d3de037e2c4c | import boto3
import logging
import os
import json
dynamodb_client = boto3.client('dynamodb', region_name="us-east-1")
sns_client = boto3.client('sns', region_name="us-east-1")
CUSTOMERS_TABLE_NAME = os.getenv('CUSTOMERS_TABLE', "functions_table")
SNS_TOPIC_ARN_FUNCTION_MODIFIED = "function_modified"
with_sns_msg = True
def notify_new_customer(key):
message = {
'newCustomer': key,
}
sns_client.publish(TopicArn=SNS_TOPIC_ARN_FUNCTION_MODIFIED, Message=json.dumps(message))
logging.info('sns message published')
def lambda_handler(event, context):
# This lambda will handle new user files uploaded to the customer s3 bucket
for record in event['Records']:
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
logging.debug("Got a new customer file")
dynamodb_client.put_item(TableName=CUSTOMERS_TABLE_NAME, Item={"key": key})
if with_sns_msg:
notify_new_customer(key)
return event
# if __name__ == "__main__":
# lambda_namdler({})
|
989,687 | dc2b4a1b1f7f5f3982b1130a4961fa5a05aba48a | def bubble(a):
l=len(a)
for i in range(l):
for j in range(l-i-1):
if a[j]>a[j+1]:
a[j+1],a[j]=a[j],a[j+1]
return a
print ("Enter array elements")
a=input().split()
a=[int(x) for x in a]
print("Sorted array")
a=bubble(a)
for i in a:
print(i)
|
989,688 | b1b630b0a409a6ad11280e8c4c45d0fc05920f32 | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file is stored in the variable path
#Code starts here
# Data Loading
data = pd.read_csv(path)
data.rename(columns={'Total': 'Total_Medals'}, inplace=True)
data.head(10)
# Summer or Winter
data['Better_Event'] = np.where(data['Total_Summer'] > data['Total_Winter'],'Summer', 'Winter')
data['Better_Event'] = np.where(data['Total_Summer'] == data['Total_Winter'], 'Both', data['Better_Event'])
better_event = data['Better_Event'].value_counts().idxmax()
# Top 10
top_countries = data[['Country_Name', 'Total_Summer', 'Total_Winter', 'Total_Medals']]
top_countries.drop('Total_Medals', axis=1)
top_countries = top_countries[:-1]
def top_ten(top_countries, col):
country_list = list(top_countries.nlargest(10, col)['Country_Name'])
return country_list
top_10_summer = top_ten(top_countries, 'Total_Summer')
top_10_winter = top_ten(top_countries, 'Total_Winter')
top_10 = top_ten(top_countries, 'Total_Medals')
common = list(set(top_10_summer).intersection(top_10_winter).intersection(top_10))
# Plotting top 10
fig, ax = plt.subplots(3)
summer_df = data[data['Country_Name'].isin(top_10_summer)]
ax[0].bar(summer_df['Country_Name'], summer_df['Total_Summer'])
winter_df = data[data['Country_Name'].isin(top_10_winter)]
ax[1].bar(winter_df['Country_Name'], winter_df['Total_Winter'])
top_df = data[data['Country_Name'].isin(top_10)]
ax[2].bar(top_df['Country_Name'], top_df['Total_Winter'])
ax[0].set(xlabel='Country_Name', ylabel='Total_Summer')
ax[1].set(xlabel='Country_Name', ylabel='Total_Winter')
ax[2].set(xlabel='Country_Name', ylabel='Total_Medals')
# Top Performing Countries
summer_df['Golden_Ratio'] = summer_df['Gold_Summer'] / summer_df['Total_Summer']
summer_max_ratio = summer_df['Golden_Ratio'].max()
summer_country_gold = summer_df['Golden_Ratio'].idxmax()
summer_country_gold = summer_df['Country_Name'][summer_country_gold]
winter_df['Golden_Ratio'] = winter_df['Gold_Winter'] / winter_df['Total_Winter']
winter_max_ratio = winter_df['Golden_Ratio'].max()
winter_country_gold = winter_df['Golden_Ratio'].idxmax()
winter_country_gold = winter_df['Country_Name'][winter_country_gold]
top_df['Golden_Ratio'] = top_df['Gold_Total'] / top_df['Total_Medals']
top_max_ratio = top_df['Golden_Ratio'].max()
top_country_gold = top_df['Golden_Ratio'].idxmax()
top_country_gold = top_df['Country_Name'][top_country_gold]
# Best in the world
data_1 = data[:-1]
data_1['Total_Points'] = data_1['Gold_Total'] * 3 + data_1['Silver_Total'] * 2 + data_1['Bronze_Total']
most_points = data_1['Total_Points'].max()
best_country = data_1['Country_Name'][data_1['Total_Points'].idxmax()]
# Plotting the best
best = data[data['Country_Name'] == best_country]
best = best[['Gold_Total', 'Silver_Total', 'Bronze_Total']]
best.plot.bar(stacked=True)
locs, labels = plt.xticks()
new_xticks = [best_country for d in locs]
plt.xticks(locs, new_xticks, rotation=45, horizontalalignment='right')
plt.xlabel('United States')
plt.ylabel('Medals Tally')
|
989,689 | 49567b26eabfe372ab6b8a06b14270e427b413e8 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 5/21/2018 11:05 AM
# @Author : Siqi
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from sklearn.metrics import precision_score, recall_score, f1_score
from Bi_LSTM_Model import BiLSTM
from tensorflow.contrib import learn
from gensim.models.keyedvectors import KeyedVectors
import sklearn.metrics
import time
np.set_printoptions(threshold=np.inf)
# Parameters
# ==================================================
# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("train_label_0", "./train/label_0.txt", "Data source for label 0")
tf.flags.DEFINE_string("train_label_1", "./train/label_1.txt", "Data source for label 1")
tf.flags.DEFINE_string("train_label_2", "./train/label_2.txt", "Data source for label 2")
tf.flags.DEFINE_string("train_label_3", "./train/label_3.txt", "Data source for label 3")
tf.flags.DEFINE_string("train_label_4", "./train/label_4.txt", "Data source for label 4")
tf.flags.DEFINE_string("train_label_5", "./train/label_5.txt", "Data source for label 5")
#Model
# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
tf.flags.DEFINE_float("learning_rate", 0.001, "learning_rate")
#LSTM
tf.flags.DEFINE_integer("hidden_sizes", 128, "Number of hidden sizes (default: 128)")
#tf.flags.DEFINE_integer("attention_size", 300, "ATTENTION_SIZE")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
#FLAGS._parse_flags()
FLAGS.flag_values_dict()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
#load
x_text, y = data_helpers.load_data_and_labels(FLAGS.train_label_0, FLAGS.train_label_1,
FLAGS.train_label_2, FLAGS.train_label_3,
FLAGS.train_label_4, FLAGS.train_label_5
)
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# Split train/dev set
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
# Training
# ==================================================
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
np.random.seed(1)
tf.set_random_seed(2)
cnn = BiLSTM(
input_embedding_size = FLAGS.embedding_dim,
sequence_length = x_train.shape[1],
#hidden_size = FLAGS.num_filters * len(list(map(int, FLAGS.filter_sizes.split(",")))),
hidden_size=FLAGS.hidden_sizes,
output_size = y_train.shape[1],
vocab_size = len(vocab_processor.vocabulary_),
learning_rate = FLAGS.learning_rate)
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
grad_summaries_merged = tf.summary.merge(grad_summaries)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar("loss", cnn.loss)
acc_summary = tf.summary.scalar("accuracy", cnn.accuracy)
# Train Summaries
train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
#sess.run(tf.global_variables_initializer())
#load word2vec
# print("Start Loading Embedding!")
# word2vec = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
# print("Finish Loading Embedding!")
# my_embedding_matrix = np.zeros(shape=(len(vocab_processor.vocabulary_), FLAGS.embedding_dim))
# for word in vocab_processor.vocabulary_._mapping:
# id = vocab_processor.vocabulary_._mapping[word]
# if word in word2vec.vocab:
# my_embedding_matrix[id] = word2vec[word]
# else:
# my_embedding_matrix[id] = np.random.uniform(low=-0.0001, high=0.0001, size=FLAGS.embedding_dim)
# W = tf.placeholder(tf.float32, [None, None], name="pretrained_embeddings")
# set_x = cnn.W.assign(my_embedding_matrix)
# sess.run(set_x, feed_dict={W: my_embedding_matrix})
print("Finish transfer")
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
}
_, step, summaries, loss, accuracy, predictions,y_actual = sess.run(
[train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy, cnn.predictions,cnn.y],
feed_dict)
time_str = datetime.datetime.now().isoformat()
# print("train_f1_score:", f1_score(y_actual, predictions, average=None))
# print (predictions)
# print(y_actual)
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
return accuracy
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy ,predictions,y_actual= sess.run(
[global_step, dev_summary_op, cnn.loss, cnn.accuracy, cnn.predictions,cnn.y],
feed_dict)
time_str = datetime.datetime.now().isoformat()
if writer:
writer.add_summary(summaries, step)
return accuracy
if __name__ == "__main__":
# Save the maximum accuracy value for validation data
sess.run(tf.global_variables_initializer())
max_acc_dev = 0.
max_epoch = 0
for epoch in range(FLAGS.num_epochs):
time_start = time.time()
epochs = data_helpers.epochs_iter(list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
for batch in epochs:
x_batch , y_batch = zip(*batch)
train_accuracy = train_step(x_batch , y_batch)
current_step = tf.train.global_step(sess, global_step)
# path = saver.save(sess, checkpoint_prefix, global_step=current_step)
# print("Saved model checkpoint to {}\n".format(path))
print("\nEvaluation:")
print("Epoch: %03d" % (epoch))
dev_accuracy= dev_step(x_dev, y_dev, writer=dev_summary_writer)
print ("Dev_accuracy:", dev_accuracy)
if dev_accuracy > max_acc_dev:
max_acc_dev = dev_accuracy
max_epoch = epoch
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("max_acc_dev %f" %max_acc_dev)
if (epoch - max_epoch) > 5:
break
|
989,690 | 44410626c2341a860376b6a42d6d1d6ed7e56722 | #@String directory
from ij import IJ, WindowManager
from ij.plugin import Duplicator, Thresholder
from loci.plugins import BF
import os
def get_image_paths():
files = os.listdir(directory)
files = [f.strip() for f in files]
files = [f for f in files if f[-4:]=='.oib']
return files
def run_analysis(img_name):
path = directory + img_name
results_dir = directory + "Results/"
if not os.path.exists(results_dir):
os.makedirs(results_dir)
img = BF.openImagePlus(path)[0]
imgDup = img.duplicate()
#imgDup = img.Duplicator()
imgDup.show()
IJ.setAutoThreshold(imgDup, "Default dark no-reset")
# IJ.run(imgDup, "Threshold...", "")
# IJ.run(imgDup, "Convert to Mask", "method=Default background=Dark calculate black")
# IJ.run(imgDup, run("Close-", "stack"))
IJ.run(imgDup, "Set Measurements...", "area mean min display decimal=3")
IJ.run("Analyze Particles...", "size=2-Infinity circularity=0.15-1.00 display clear stack")
IJ.saveAs("Results", results_dir + img_name + "_results.csv")
IJ.selectWindow("Results")
IJ.run("Close")
img.close()
imgDup.close()
if __name__ in ['__builtin__','__main__']:
if directory[-1] != '/':
directory += '/'
imagePaths = get_image_paths()
for path in imagePaths:
run_analysis(path)
IJ.run("Quit")
|
989,691 | 5fb82dfd918cdac7a4dfba3619c5c21250f36ec3 | import datetime
class Post:
def __init__(self, content, author_id):
self.content = content
self.author_id = author_id
self.timestamp = datetime.datetime.now().timestamp()
def insert(self, cursor):
cursor.execute('''
INSERT INTO posts
( content
, author_id
, timestamp
)
VALUES
( ?, ?, ?)
''', (self.content, self.author_id, self.timestamp)
)
def __repr__(self):
return "[Post by %s at %s: %s]"%(
self.author_id,
str(datetime.datetime.fromtimestamp(self.timestamp)),
self.content[:50]
)
@classmethod
def create_table(cls, cursor):
cursor.execute('DROP TABLE IF EXISTS posts')
cursor.execute('''
CREATE TABLE posts
( author_id TEXT NOT NULL
, content TEXT
, timestamp DOUBLE
, FOREIGN KEY (author_id) REFERENCES users(email)
)''')
class PostForDisplay:
def __init__(self, row):
self.author_name = row['author_name']
self.date = datetime.datetime.fromtimestamp(row['timestamp'])
self.content = row['content']
@classmethod
def getAll(cls, cursor):
cursor.execute('''
SELECT name AS author_name, content, timestamp
FROM posts
JOIN users ON author_id=email
ORDER BY timestamp DESC
''')
return [ cls(row) for row in cursor.fetchall() ] |
989,692 | 88ed964382c720f23f10895566a4506978928dbf | #####################################################################
# #
# This program will automatically run 'run' and check #
# if any undocumented computers are online #
# #
# THIS VERSION WILL TAKE ABOUT 17 MINUTES #
# DO NOT USE UNLESS YOU ARE LEAVING THE PC ALONE #
# #
#####################################################################
# This tells the interpreter to use Python 3
#!python3
import pyautogui
import time
# This is a template for our workstation names
cpuName = '\\\\wpsws{}\\c$'
# Open 'run'
pyautogui.hotkey('winleft', 'r')
# Type the intended PC names
pyautogui.typewrite('\\\\OXOFFICEADMIN\\c$')
# Click okay
pyautogui.click(172, 997)
# Wait 30 seconds for the folder to be found/not found
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite('\\\\TONYM-LT\\c$')
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite('\\\\WPS-SAFE\\c$')
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(15))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(155))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(17))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(41))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(59))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(60))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(10))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(13))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(14))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(19))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(25))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(28))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(29))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(61))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(73))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite('\\\\BARBARAK_LATITU\\c$')
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite('\\\\RENATO-LAPTOP\\c$')
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(26))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(36))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(37))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(38))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(39))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(46))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(69))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite('\\\\wpsws01\\c$')
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(146)) #THIS IS MY PC, DON'T DELETE IT
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(20))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(47))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(48))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(49))
pyautogui.click(172, 997)
time.sleep(30)
pyautogui.hotkey('winleft', 'r')
pyautogui.typewrite(cpuName.format(52))
pyautogui.click(172, 997)
time.sleep(30)
|
989,693 | b34ff0a6d766334bcab43194f107b67c624509f1 | import base64
import hashlib
import hmac
import json
from datetime import datetime, timedelta
from time import time
import requests
from utils.configuration import agni_configuration
from utils.logger import getAgniLogger
_logger = getAgniLogger(__name__)
PARAM_ACCESS_TOKEN = 'access_token'
PARAM_STATUS = 'status'
PARAM_PAGE_SIZE = 'page_size'
PARAM_PAGE_NUMBER = 'page_number'
PARAM_OCCURRENCE_ID = 'occurrence_id'
PARAM_NEXT_PAGE_TOKEN = 'next_page_token'
PARAM_ACTION = 'action'
ACTION_ALLOW = 'allow'
ACTION_CANCEL = 'cancel'
ACTION_DENY = 'deny'
PARAM_REGISTRANTS = 'registrants'
MAX_REGISTRANTS_PER_CALL = 30
ENDPOINT_WEBINARS = 'webinars'
ENDPOINT_WEBINAR_REGISRANTS = 'webinars/{zoomWebinarId}/registrants'
ENDPOINT_UPDATE_WEBINAR_REGISTRANTS_STATUS = ENDPOINT_WEBINAR_REGISRANTS + '/status'
class ZoomApiError(Exception):
pass
def generateJwtToken(api_key, api_secret, current_timestamp, expiry):
header = {
'alg': 'HS256',
'typ': 'JWT',
}
header_json = json.dumps(header)
_logger.debug('Header json: %s', header_json)
claim = {
'iss': api_key,
'iat': current_timestamp,
'exp': expiry,
}
claim_json = json.dumps(claim)
_logger.debug('Claim json: %s', claim_json)
headerEncoded = base64.urlsafe_b64encode(header_json).replace('=', '')
claimEncoded = base64.urlsafe_b64encode(claim_json).replace('=', '')
segments = [headerEncoded, claimEncoded]
signatureInput = '.'.join(segments)
signature = base64.urlsafe_b64encode(
hmac.new(api_secret, msg=signatureInput, digestmod=hashlib.sha256).digest()
).replace('=', '')
segments.append(signature)
return '.'.join(segments)
def decodeJwtToken(token):
segments = token.split('.')
segments = [ s + '='*((4 - len(s)%4)%4) for s in segments]
segments = [base64.urlsafe_b64decode(s) for s in segments]
header = json.loads(segments[0])
claim = json.loads(segments[1])
sign = segments[2]
return header, claim, sign
TOKEN_LIFETIME_INPUT_PROMPT = '''
Need token lifetime in <number><unit> format.
Examples:
1d = 1 day
2h = 2 hours
3m = 3 minutes
4s = 4 seconds
Enter token lifetime>
'''.strip()
LIFETIME_SECONDS = {'d':86400, 'h':3600, 'm':60, 's':1}
def askAndMakeZoomApiToken():
token_life_input = raw_input(TOKEN_LIFETIME_INPUT_PROMPT).strip().lower()
try:
token_life = int(token_life_input[:-1])*LIFETIME_SECONDS[token_life_input[-1]]
except:
_logger.error('Invalid token lifetime: %s', token_life_input)
return
now = int(time())
expDt = datetime.now() + timedelta(seconds=token_life)
exp = now + token_life
print "Here's your new token:\n\n%s\n"%(generateJwtToken(
agni_configuration.getZoomApiKey(),
agni_configuration.getZoomApiSecret(),
now,
exp
))
print "The token is valid till %s\n"%(expDt)
class ZoomApi:
def __init__(self):
self._baseUrl = agni_configuration.getZoomApiBaseUrl()
self._api_key = agni_configuration.getZoomApiKey()
self._api_secret = agni_configuration.getZoomApiSecret()
self._access_token = agni_configuration.getZoomApiToken()
self._token_expiry = None
@property
def accessToken(self):
now = int(time())
if self._token_expiry is None:
if self._access_token:
h, c, s = decodeJwtToken(self._access_token)
self._token_expiry = c.get('exp')
if self._token_expiry:
if self._token_expiry - now > 10:
return self._access_token
_logger.info('No Zoom access token or it is nearing expiry. Generating new access token.')
self._token_expiry = now + 300 # Five minutes expiry
self._access_token = generateJwtToken(self._api_key, self._api_secret, now, self._token_expiry)
return self._access_token
def checkResponse(self, resp):
_logger.info('Zoom api returned status code %s', resp.status_code)
if 200 <= resp.status_code < 300:
return
# {u'message': u'Invalid access token.', u'code': 124}
resp_json = None
try:
resp_json = resp.json()
except:
pass
raise ZoomApiError('Zoom api error: status_code=%s json=%s'%(resp.status_code, resp_json), resp.status_code, resp_json)
def getWebinarRegistrants(self, zoomWebinarId, status='approved', page_size=300, page_number=1,
occurrence_id=None, next_page_token=None):
requestUrl = (self._baseUrl + ENDPOINT_WEBINAR_REGISRANTS).format(
zoomWebinarId=zoomWebinarId
)
requestQuery = {
PARAM_ACCESS_TOKEN: self.accessToken,
PARAM_STATUS: status,
PARAM_PAGE_SIZE: page_size,
PARAM_PAGE_NUMBER: page_number,
}
requestBody = {
}
if occurrence_id:
requestQuery[PARAM_OCCURRENCE_ID] = occurrence_id
if next_page_token:
requestQuery[PARAM_NEXT_PAGE_TOKEN] = next_page_token
resp = requests.get(requestUrl, params=requestQuery, data=requestBody)
self.checkResponse(resp)
resp_json = resp.json()
# _logger.debug('Got zoom api response: %s', resp_json)
return resp.json()
def getAllWebinarRegistrants(self, zoomWebinarId, status='approved', occurrence_id=None):
allRegistrants = []
data = self.getWebinarRegistrants(zoomWebinarId, status=status, occurrence_id=occurrence_id)
page_count = data['page_count']
total_records = data['total_records']
next_page_token = data['next_page_token']
registrants = data['registrants']
allRegistrants += registrants
page_number = 1
if len(registrants) < total_records:
while page_number < page_count:
data = self.getWebinarRegistrants(zoomWebinarId, status=status,
occurrence_id=occurrence_id, next_page_token=next_page_token)
next_page_token = data['next_page_token']
registrants = data['registrants']
allRegistrants += registrants
page_number += 1
return allRegistrants
def updateWebinarRegistrantsStatus(self, zoomWebinarId, action, registrants=None, occurrence_id=None):
if not registrants:
registrants = []
requestUrl = (self._baseUrl + ENDPOINT_UPDATE_WEBINAR_REGISTRANTS_STATUS).format(
zoomWebinarId=zoomWebinarId
)
requestQuery = {
PARAM_ACCESS_TOKEN: self.accessToken,
}
if occurrence_id:
requestQuery[PARAM_OCCURRENCE_ID] = occurrence_id
requestBody = {
PARAM_ACTION: action
}
respList = []
if len(registrants) <= MAX_REGISTRANTS_PER_CALL:
if registrants:
requestBody[PARAM_REGISTRANTS] = registrants
#json.dumps(requestBody)
#_logger.debug('updateWebinarRegistrantsStatus: requestBody = %s', requestBody)
#_logger.debug('updateWebinarRegistrantsStatus: json-requestBody = %s', requestBody)
resp = requests.put(requestUrl, params=requestQuery, json=requestBody)
self.checkResponse(resp)
# resp_json = resp.json()
# _logger.debug('Got zoom api response: %s', resp_json)
respList.append(resp)
else:
for i in xrange(0, len(registrants), MAX_REGISTRANTS_PER_CALL):
startIdx = i
endIdx = (i+MAX_REGISTRANTS_PER_CALL) if (i+MAX_REGISTRANTS_PER_CALL) < len(registrants) else len(registrants)
requestBody[PARAM_REGISTRANTS] = registrants[startIdx:endIdx]
#_logger.debug('updateWebinarRegistrantsStatus: requestBody = %s', requestBody)
resp = requests.put(requestUrl, params=requestQuery, json=requestBody)
self.checkResponse(resp)
# resp_json = resp.json()
# _logger.debug('Got zoom api response: %s', resp_json)
respList.append(resp)
return respList
def main():
import sys
try:
now = int(time())
genToken = generateJwtToken(sys.argv[1], sys.argv[2], now, now+300)
print genToken
header, claim ,sign = decodeJwtToken(genToken)
print 'Expiry:', claim.get('exp')
except:
_logger.exception('Usage: python %s api-key api-secret', __file__)
if __name__ == '__main__':
main()
|
989,694 | 03ff1ec7b54cbe04266236a63f3730629c6b7d83 |
# ex.1
print("I'd much rather you 'not'.")
print('I "said" do not.')
# ex.3
print("hens", 25 + 30 / 6)
print("roosters", 100 - 25 * 3 % 4)
# < has lower precedence than +/-. + first, - second, then comparison
# PEMDAS PE(M&D)(A&S)
print(3.2 + 2.5 < 5 - 7)
print(5 >= -2.45)
# ex.4
cars_driven = 100
space_in_a_car = 3.5
capacity = cars_driven * space_in_a_car
print('capacity: ', capacity)
# ex.5
name = 'zed'
age = 39
height = 1.8
print(f"Let's talk about {name}.")
print(f"he's {age} years old.")
print("he's ", round(height), "m height.")
print("he's " + str(round(height)) + "m height.")
# ex.6
print('\n##### ex. 6')
types_of_ppl = 10
x = f"there are {types_of_ppl} types of people"
binary = 'binary'
do_not = "don't"
y = f"those who know {binary} and those who {do_not}"
print(x)
print(y)
print(f"I said: {x}")
print(f"I also said: '{y}' ")
hilarious = True
joke_eval = "Isn't that funny? {}"
print(joke_eval.format(hilarious))
# ex.7
print('\n##### ex. 7')
print("Mary had a little lamb")
print("its fleece was white as {}.".format('snow'))
print("And everywhere that mary went.")
print("." * 10)
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end9 = "g"
end10 = "h"
end11 = "e"
end12 = "r"
# end=' ' removes \n it seems
print(end1 + end2 + end3 + end4 + end5 + end6, end=' ')
print(end7 + end8 + end9 + end10 + end11 + end12)
# ex.8
print('\n##### ex. 8')
formatter = "{} {} {} {}"
print(formatter.format(1, 2, 3, 4))
print(formatter.format('one', 'two', 'three', 'four'))
print(formatter.format(True, False, True, False))
print(formatter.format(formatter, formatter, formatter, formatter))
print(formatter.format(
"Try your",
"Own text here",
"Maybe a poem",
"Or a song about fear."
))
# ex.9
print('\n##### ex. 9')
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
print("Days: ", days)
print("Months: ", months)
print(
'''
There's something going on here.
With the three double-quotes.
We'll be able to type as much as we like.
Even 4 lines if we want, or 5, or 6.
''')
|
989,695 | 8981a898331836099717aca640b8fcfbb518ad8d | from country_class import country
from player_class import player
from generate_countries import generateCountries
from random import shuffle
from drawer import drawMap
from setup import init_players,split_territories
from helper_functions import attackTerritory,fortify
countries=generateCountries()
id_to_country={}
for territory in countries:
id_to_country[territory.idnumb]=territory
players=init_players()
temp_countries=countries[:]
split_territories(players,temp_countries)
shuffle(players)
turn=0
rounds=0
while len(players)>1:
drawMap(countries,players)
rounds+=1
if turn>len(players)-1:
turn=0
current_player=players[turn]
current_player.gotCard=False
print(current_player.color.title(),"player's turn")
print("You have the following cards: ")
print(current_player.displayCards())
current_player.bonusTroops=0
a=input("Would you like to play a set of cards (y/n): ")
if a[0].lower()=="y":
playcards=input("Cards to play (space seperated): ")
current_player.playCards(playcards)
current_player.placeTroops(countries)
print("Input 'end' at any time to stop attacking")
in2=input("Input e or the id/name of the attacking and defending country, along with the attacking troops (e.g. 13 14 5) ")
print("Input 'showmap' to view the map")
while in2!="e":
if in2=='showmap':
drawMap(countries,players)
else:
in2=in2.split()
defender=id_to_country[int(in2[1])].ownedBy
id_to_country[int(in2[0])],id_to_country[int(in2[1])],current_player,defender=attackTerritory(id_to_country[int(in2[0])],id_to_country[int(in2[1])],current_player,defender,int(in2[-1]))
in2=input("Input e or the id/name of the attacking and defending country, along with the attacking troops (e.g. 13 14 5) ")
last_in=input("Would you like to fortify (y/n): ")
if last_in[0].lower()=="y":
z=id_to_country[int(input("Origin: "))]
x=id_to_country[int(input("Destination: "))]
y=int(input("Number of troops: "))
z,x=fortify(current_player,z,x,y,countries)
id_to_country[z.idnumb]=z
id_to_country[x.idnumb]=x
turn+=1
print("Game over!")
for p in players:
if p.alive==True:
print("The",p.color,"has won!")
|
989,696 | 57b24de6bf23b0079cc998a03670b5f8795084b6 |
"""
This is from an internal project tracking application; it extends two
generic views as well as defining logic for various sorting operations.
"""
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext, loader
from models import Project, Ticket, STATUS_CODES, PRIORITY_CODES
from django.contrib.auth.models import User
from django.views.generic.list_detail import object_list, object_detail
def ticket_detail(request, object_id, queryset):
instance = queryset.get(id=object_id)
for status in STATUS_CODES:
if instance.status == status[0]:
status = status[1]
break
for priority in PRIORITY_CODES:
if instance.priority == priority[0]:
priority = priority[1]
break
extra_context = {
'status': status,
'priority': priority,
}
return object_detail(
request,
object_id=object_id,
queryset=queryset,
extra_context=extra_context
)
def ticket_list(request):
try:
status = request.GET['status']
except:
status = 'Open'
try:
user = User.objects.get(username=request.GET['user'])
except:
user = None
try:
project = Project.objects.get(name=request.GET['project'])
except:
project = None
users = User.objects.all()
projects = Project.on_site.all()
extra_context = {
'status_codes': [ pair[1] for pair in STATUS_CODES ],
'current_status': status,
'users': users,
'current_user': user,
'projects': projects,
'current_project': project,
}
for code in STATUS_CODES:
if status == code[1]:
status = code[0]
try:
status = int(status)
except:
status = 1
queryset = Ticket.on_site.filter(status=status)
if user:
queryset = queryset.filter(assignee=user)
return object_list(request, queryset=queryset, extra_context=extra_context)
def index(request):
projects = Project.objects.on_site()
tmpl = loader.get_template('base.html')
context = RequestContext(request, {
'projects': projects,
})
return HttpResponse(tmpl.render(context))
"""
This view retrieves a list of objects based on an arbitrary number of
user-specified tags, extending a generic list view to take advantage
of automatic pagination. It also handles user specification for tag
cloud view modes.
"""
from django.views.generic.list_detail import object_list
from django.template import RequestContext
from settings import PAGINATE_BY
from tagging.models import Tag
def tag_object_list(request, cls, tag_string='', template_name='tag_object_list.html'):
tags = []
rss = False
for tag_name in tag_string.split('/'):
try:
tag = Tag.objects.get(name__iexact=tag_name.strip())
tags.append(tag)
except Tag.DoesNotExist:
if tag_name == 'rss':
rss = True
continue
items = cls.objects.all()
for tag in tags:
items = items.filter(tags__id=tag.id)
items = items.order_by('-id')
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
mode = request.GET['view_mode']
if mode == 'all':
request.session["view_mode"] = 'all'
else:
request.session["view_mode"] = ''
except:
pass
try:
if request.session["view_mode"] == 'all':
mode = 'all'
else:
mode = ''
except:
request.session["view_mode"] = ''
mode = ''
if mode == 'all':
display_tags = Tag.getSubsetTags(cls, tags, limit=False)
else:
display_tags = Tag.getSubsetTags(cls, tags)
extra_context = {
'display_tags': display_tags,
'viewing_tags': tags,
'view_mode': mode,
}
if rss is True:
template_name = 'tag_object_list_rss.html'
if len(items):
extra_context['last_build'] = items[0].date
else:
extra_context['last_build'] = 0
return object_list(request, items, extra_context=extra_context, template_name=template_name, page=page, paginate_by=PAGINATE_BY)
|
989,697 | e6dd515872a7e49df7194d230e868ea1a418eda3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 14 14:57:07 2019
@author: xam
"""
from PIL import Image
import numpy as np
import statistics
import getBlobLoc
import itertools
import math
import Kclus
import Squarepos
b1 = getBlobLoc.getOrdBlob('1.png')
b2 = getBlobLoc.getOrdBlob('2.png')
b3 = getBlobLoc.getOrdBlob('3.png')
pix1 = Squarepos.getPixels('1.png')
cord1 = Squarepos.coList()
pix2 = Squarepos.getPixels('2.png')
cord2 = Squarepos.coList()
pix3 = Squarepos.getPixels('3.png')
cord3 = Squarepos.coList()
a1 = []
a2 = []
a3 = []
blobs1r = []
blobs2r = []
blobs3r = []
blobs1ar = []
blobs2ar = []
blobs3ar = []
weirdBlobs = []
testVal = []
finPixList =[]
for blobs in b1:
tempr = []
tempar = []
for blob in blobs:
x,y,r = blob
tempr = tempr + [r]
tempar = tempar+ [(math.pi)*(r*r)]
blobs1ar = blobs1ar + [tempar]
blobs1r = blobs1r + [tempr]
for blobs in b2:
tempr = []
tempar = []
for blob in blobs:
x,y,r = blob
tempr = tempr + [r]
tempar = tempar+ [(math.pi)*(r*r)]
blobs2r = blobs2r + [tempr]
blobs2ar = blobs2ar + [tempar]
for blobs in b3:
tempr = []
for blob in blobs:
x,y,r = blob
tempr = tempr + [r]
tempar = tempar+ [(math.pi)*(r*r)]
blobs3ar = blobs3ar + [tempar]
blobs3r = blobs3r + [tempr]
#for cord in cord1:
# xys = cord[0]
# xs = xys[0]
# ys = xys[1]
# xye = cord[1]
# xe = xye[0]
# ye = xye[1]
for pix in pix1:
array = np.array(pix, dtype=np.uint8)
image = Image.fromarray(array)
area = Kclus.getArea(image)
a1 = a1 + [area]
for pix in pix2:
array = np.array(pix, dtype=np.uint8)
image = Image.fromarray(array)
area = Kclus.getArea(image)
a2 = a2 + [area]
for pix in pix3:
array = np.array(pix, dtype=np.uint8)
image = Image.fromarray(array)
area = Kclus.getArea(image)
a3 = a3 + [area]
for rads in blobs1r:
count = 0
x = statistics.mean(rads)
sd = statistics.stdev(rads)
for r in rads:
if r > (x+sd) or r <(x-sd):
count+=1
weirdBlobs= weirdBlobs +[count]
for pos in range(0,96):
it = itertools.repeat(0,3)
testVal.append(it)
for pos in range(0,96):
if testVal[pos]==1:
finPixList = finPixList+ [pix1[pos]]
if testVal[pos]==2:
finPixList = finPixList+ [pix2[pos]]
if testVal[pos]==3:
finPixList = finPixList+ [pix3[pos]]
array = np.array(finPixList, dtype=np.uint8)
image = Image.fromarray(array)
image.save('master.png')
|
989,698 | 002dddd665795003017bb9e91bb45c1b6253fa9c | import torch
import torch.nn as nn
import torch.nn.functional as F
class PANNsLoss(nn.Module):
def __init__(self):
super().__init__()
self.bce = nn.BCEWithLogitsLoss()
self.cel = nn.CrossEntropyLoss()
def forward(self, input, target):
"""
input_ = input
input_ = torch.where(
torch.isnan(input_),
torch.zeros_like(input_),
input_
)
input_ = torch.where(
torch.isinf(input_),
torch.zeros_like(input_),
input_
)
target = target.float()
"""
#return self.bce(input_, target)
return self.bce(input, target) |
989,699 | bb547879b6b4f0a566266086ac6815fa743eebdb | # Generated by Django 3.1.1 on 2020-09-11 00:38
import django.contrib.postgres.fields
import django.db.models.deletion
import django_enumfield.db.fields
from django.db import migrations, models
import cards.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.CharField(max_length=16)),
('name', models.CharField(max_length=255)),
('card_type', django_enumfield.db.fields.EnumField(enum=cards.models.CardType)),
('color', django_enumfield.db.fields.EnumField(enum=cards.models.CardColor)),
('play_cost', models.IntegerField(default=0)),
('digivolve_cost', models.IntegerField(blank=True, null=True)),
('digivolve_source_color', django.contrib.postgres.fields.ArrayField(base_field=django_enumfield.db.fields.EnumField(enum=cards.models.CardColor), blank=True, null=True, size=None)),
('level', models.IntegerField(blank=True, null=True)),
('stage', django_enumfield.db.fields.EnumField(default=0, enum=cards.models.DigimonStage)),
('digimon_power', models.IntegerField(blank=True, null=True)),
('flavor_type', models.CharField(blank=True, max_length=255, null=True)),
('flavor_attribute', models.CharField(blank=True, max_length=255, null=True)),
('rarity', django_enumfield.db.fields.EnumField(enum=cards.models.CardRarity)),
],
),
migrations.CreateModel(
name='CardEffects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('effect_type', django_enumfield.db.fields.EnumField(enum=cards.models.EffectType)),
],
),
migrations.CreateModel(
name='ReleaseSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('release_date', models.DateField(blank=True, null=True)),
('card_types', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='CardArt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist', models.CharField(max_length=255)),
('card', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cards.card')),
('release_set', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cards.releaseset')),
],
),
migrations.AddField(
model_name='card',
name='release_set',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='cards.releaseset'),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.