text stringlengths 38 1.54M |
|---|
from pydantic import BaseModel
class Jobs(BaseModel):
job_id: str
job_title: str
company: str
job_post_date: str
job_requirement_career_level: str
company_size: str
company_industry: str
job_description: str
job_employment_type: str
job_function: str
|
from odoo import models, fields, api
from odoo import exceptions
from odoo.exceptions import ValidationError
import logging
_logger = logging.getLogger(__name__)
class crossoveredbudgetlines (models.Model):
_inherit = 'crossovered.budget.lines'
x_bp_code = fields.Char(related='general_budget_id.x_bp_code')
|
"""
Name: Phan Tấn Đạt
ID: 18127078
Email: 18127078@student.hcmus.edu.vn
AI lab01 Project
"""
from Breadth_first_search import Breadth_first_search
from Uniform_cost_search import Uniform_cost_search
from Greedy_best_first_search import Greedy_best_first_search
from A_star_graph_search import A_star_graph_search
from Iterative_deepening_search import Iterative_deepening_search
from Classes import Maze
from file_tools import OutputData, ImportData, choose_input_files, printResult
import sys
# --------------------------------------
if __name__ == "__main__":
file_name = choose_input_files("..\INPUT")
if file_name is not None:
input_list = ImportData(file_name)
if len(input_list) < 3:
print("No data was imported")
print(input_list)
sys.exit()
size = int(input_list.pop(0))
goal = int(input_list.pop(-1))
if (goal < 0) or (goal > int(size * size)):
print("\n[Warning]: Goal doesn't exist in Maze!\n->This might result in long runtime and uncompleted result!!\n")
board = Maze(size,input_list,goal)
# start = input("Enter the number of starting point: ")
# start = int(start)
start = 0
print("Starting point:\t", start)
print("Goal:\t\t\t", goal)
algorithms = [(Breadth_first_search),
(Uniform_cost_search),
(Iterative_deepening_search),
(Greedy_best_first_search),
(A_star_graph_search)]
for method in algorithms:
result = method(board, start, goal)
#print(method.__name__ + " completed\n")
OutputData("..\OUTPUT\ ", method.__name__, result)
printResult(method.__name__,result)
|
import pandas_datareader.data as pdr
import datetime as dt
import pandas as pd
import numpy as np
start_date = dt.date.today() - dt.timedelta(3650)
end_date = dt.date.today()
tickers = ['MSFT']
ohlcv = pdr.get_data_yahoo(tickers[0],start_date,end_date)
df = ohlcv.copy()
BollBnd(ohlcv,20).iloc[-200:,[6,7,8]].plot()
RSI(ohlcv,14)['RSI'].plot()
def MACD(Df,a,b,c):
df = Df.copy()
df['MA_Fast'] = df['Adj Close'].ewm(span = a , min_periods = a).mean()
df['MA_Slow'] = df['Adj Close'].ewm(span = b , min_periods = b).mean()
df['MACD'] = df['MA_Fast'] - df['MA_Slow']
df['Signal'] = df['MACD'].ewm(span = c ,min_periods = c).mean()
df.dropna(inplace=True)
return df
def BollBnd(Df,n):
df = Df.copy()
df['MA'] = df['Adj Close'].rolling(n).mean()
df['BB_up'] = df['Adj Close'].rolling(n).mean() + 2*df['MA'].rolling(n).std()
df['BB_dn'] = df['Adj Close'].rolling(n).mean() - 2*df['MA'].rolling(n).std()
df['BB_Width'] = df['BB_up'] - df['BB_dn']
df.dropna(inplace=True)
return df
def RSI(Df,n):
df = Df.copy()
df['delta'] = df['Adj Close'] - df['Adj Close'].shift(1)
df['gain'] = np.where(df['delta']>=0,df['delta'],0)
df['loss'] = np.where(df['delta']<0,abs(df['delta']),0)
avg_gain = []
avg_loss = []
gain = df['gain'].tolist()
loss = df['loss'].tolist()
for i in range(len(df)):
if i < n:
avg_gain.append(np.NaN)
avg_loss.append(np.NaN)
elif i == n:
avg_gain.append(df['gain'].rolling(n).mean().tolist()[n])
avg_loss.append(df['loss'].rolling(n).mean().tolist()[n])
elif i > n:
avg_gain.append(((n-1)*avg_gain[i-1] + gain[i])/n)
avg_loss.append(((n-1)*avg_loss[i-1] + loss[i])/n)
df['avg_gain'] = np.array(avg_gain)
df['avg_loss'] = np.array(avg_loss)
df['RS'] = df['avg_gain']/df['avg_loss']
df['RSI'] = 100 - (100/(1+df['RS']))
return df
#setup for starting the backtesting
portfolio = 500
days = 70
stock_list = ['RELIANCE.NS']
prices = read_data(stock_list, days)
#nav dataframe has two columns leftover cash in hand, and stock which is value of stock that we own
nav = pd.DataFrame(index = prices.tail(days-14).index)
nav = nav.assign(leftover = np.zeros(days-14), stock = np.zeros(days-14))
nav.iloc[0,0] = portfolio
signal = 0
prev_signal = 0
for index, row in nav.iloc[1:].iterrows():
signal = np.sign(signal + RSI(prices.loc[:index].tail(14)))
leftover = nav.loc[:index].tail(2).head(1).iloc[0,0]
if(signal == -1):
nav.loc[index, 'leftover'] = leftover
nav.loc[index, 'stock'] = 0
continue
if(prev_signal == 0 and signal == 1):
#buy
nav.loc[index, 'leftover'] = leftover - prices.loc[index][0]
nav.loc[index, 'stock'] = prices.loc[index][0]
if(prev_signal == 1 and signal == 1):
#hold
nav.loc[index, 'leftover'] = leftover
nav.loc[index, 'stock'] = prices.loc[index][0]
if(prev_signal == 1 and signal == 0):
#sell
nav.loc[index, 'leftover'] = leftover + prices.loc[index][0]
nav.loc[index, 'stock'] = 0
if(prev_signal == 0 and signal == 0):
#wait
nav.loc[index, 'leftover'] = leftover
nav.loc[index, 'stock'] = prices.loc[index][0]
prev_signal = signal
nav.sum(axis =1).plot()
def read_data(stock_list, days):
df = pd.DataFrame()
for ticker in stock_list :
df[ticker] = data.DataReader(ticker,'yahoo',start = '1/1/2010')['Adj Close']
return df.head(days)
def RSI(price_data) :
delta = price_data.diff()
up, down = delta.copy(), delta.copy()
up[up<0] = 0
down[down>0] = 0
roll_up = up.mean()
roll_down = down.abs().mean()
RS = roll_up/roll_down
RSI = (100.0-(100.0/(1.0+RS)))[0]
if(RSI > 70): return -1
if(RSI <30): return 1
else return 0
|
"""proj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
#from book.views import author_list
from book import views
urlpatterns = [
path('s-admin/', admin.site.urls),
path('author/', views.AuthorList.as_view(), name="author-list"),
path('author/<int:pk>/', views.author_detail, name="author-detail"),
path('author-cbv/<int:pk>/', views.AuthorDetail.as_view(), name="author-detail-cbv"),
path('author-delete/<int:pk>/', views.author_delete, name="author-delete"),
path('author-delete-cbv/<int:pk>/', views.AuthorDelete.as_view(), name="author-delete-cbv"),
path('author-create/', views.author_create, name="author-create"),
path('author-create-cbv/', views.AuthorCreate.as_view(), name="author-create-cbv"),
path('author-update/<int:pk>/', views.author_update, name="author-update"),
path('author-update-cbv/<int:pk>/', views.AuthorUpdate.as_view(), name="author-update-cbv"),
path('genre/', views.GenreList.as_view(), name="genre-list"),
path('genre-cbv/<int:pk>/', views.GenreDetail.as_view(), name="genre-detail-cbv"),
path('genre-delete-cbv/<int:pk>/', views.GenreDelete.as_view(), name="genre-delete-cbv"),
path('genre-create-cbv/', views.GenreCreate.as_view(), name="genre-create-cbv"),
path('genre-update-cbv/<int:pk>/', views.GenreUpdate.as_view(), name="genre-update-cbv"),
path('series/', views.SeriesList.as_view(), name="series-list"),
path('series-cbv/<int:pk>/', views.SeriesDetail.as_view(), name="series-detail-cbv"),
path('series-delete-cbv/<int:pk>/', views.SeriesDelete.as_view(), name="series-delete-cbv"),
path('series-create-cbv/', views.SeriesCreate.as_view(), name="series-create-cbv"),
path('series-update-cbv/<int:pk>/', views.SeriesUpdate.as_view(), name="series-update-cbv"),
path('izdatel/', views.IzdatelList.as_view(), name="izdatel-list"),
path('izdatel-cbv/<int:pk>/', views.IzdatelDetail.as_view(), name="izdatel-detail-cbv"),
path('izdatel-delete-cbv/<int:pk>/', views.IzdatelDelete.as_view(), name="izdatel-delete-cbv"),
path('izdatel-create-cbv/', views.IzdatelCreate.as_view(), name="izdatel-create-cbv"),
path('izdatel-update-cbv/<int:pk>/', views.IzdatelUpdate.as_view(), name="izdatel-update-cbv"),
]
|
from django.shortcuts import render
# Create your views here.
from axf.models import Wheel, Nav, Mustbuy, Shop, Mainshow, Foodtypes, Goods
def home(request): # 首页
# 获取顶部轮播图数据
wheels = Wheel.objects.all()
# 获取导航栏数据
navs = Nav.objects.all()
# 获取每日必购数据
mustbuys = Mustbuy.objects.all()
# 获取商品数据
shophead = Shop.objects.get(pk=1)
shoptabs = Shop.objects.filter(pk__gt=1, pk__lt=4)
shopclasses = Shop.objects.filter(pk__gt=3,pk__lt=8)
shopcommends = Shop.objects.filter(pk__gt=7)
# 获取主体内容数据
mainshow = Mainshow.objects.all()
data = {
'wheels':wheels,
'navs':navs,
'mustbuys':mustbuys,
'shophead': shophead,
'shoptabs':shoptabs,
'shopclasses':shopclasses,
'shopcommends':shopcommends,
'mainshow': mainshow,
}
return render(request,'home/home.html',data)
def market(request,categoryid,childid,sortid): # 闪购超市
# 分类数据
foodtypes = Foodtypes.objects.all()
# 子类商品数据
typeIndex = int(request.COOKIES.get('typeIndex',0))
categoryid = foodtypes[typeIndex].typeid
childtypename =foodtypes.get(typeid=categoryid).childtypenames
childList = []
dir1 = {}
for item in childtypename.split('#'):
arr1 = item.split(':')
dir1 = {
'childname':arr1[0],
'childid':arr1[1]
}
childList.append(dir1)
# print(childList)
# print(type(childList))
if childid == '0':
goods = Goods.objects.filter(categoryid=categoryid)
else:
goods = Goods.objects.filter(categoryid=categoryid,childcid=childid)
if sortid == '1':
goods = goods.order_by('-productnum')
elif sortid == '2':
goods = goods.order_by('price')
elif sortid == '3':
goods = goods.order_by('-price')
data={
'foodtypes':foodtypes,
'goods':goods,
'categoryid': categoryid,
'childid': childid,
'childList':childList
}
return render(request,'market/market.html',context=data)
def cart(request): # 购物车
return render(request,'cart/cart.html')
def mine(request): # 我的
return render(request,'mine/mine.html') |
#regular expressions
import re
mystr = """Tata Limited
Dr. David Landsman, executive director
18, Grosvenor Place
London SW1X 7HSc
Phone: +44 (20) 7235 8281
Fax: +44 (20) 7235 8727
Email: tata@tata.co.uk
Website: www.europe.tata.com
Directions: View map
Tata Sons, North America
1700 North Moore St, Suite 1520
Arlington, VA 22209-1911
USA
Phone: +1 (703) 243 9787
Fax: +1 (703) 243 9791
66-66
455-4545
Email: northamerica@tata.com
Website: www.northamerica.tata.com
Directions: View map"""
# findall, search, split, sub, finditer
#findall= it returns the specific string matches
#search= it returns a match object
print(r"\n") #this prints \n as output. r is known as raw string
patt= re.compile(r"map")
#meta characters
matches = patt.finditer(mystr)
for match in matches:
print(match)
print(mystr[448:552])
patt= re.compile(r".") #it matches everything
matches = patt.finditer(mystr)
for match in matches:
print(match)
patt = re.compile(r'^Tata') #
matches = patt.finditer(mystr)
for match in matches:
print(match)
patt = re.compile(r'iin$') # $ indicates ends with
matches = patt.finditer(mystr)
for match in matches:
print(match)
patt = re.compile(r'ai{2}') #{} indicates that i comes two times
matches = patt.finditer(mystr)
for match in matches:
print(match)
patt = re.compile(r'(ai){1}') #this tells that string 'ai' comes only once
matches = patt.finditer(mystr)
for match in matches:
print(match)
patt = re.compile(r'ai{1}|t') # | is either symbol. either 'ai' or 't'
matches = patt.finditer(mystr)
for match in matches:
print(match)
#special characters
patt= re.compile(r"\ATata") # string begins with "tata"
matches = patt.finditer(mystr)
for match in matches:
print(match)
patt= re.compile(r"\bmap") # string begins or ends with given word
matches = patt.finditer(mystr)
for match in matches:
print(match)
patt= re.compile(r"27\b") # there is an ending with 27
matches = patt.finditer(mystr)
for match in matches:
print(match)
patt= re.compile(r"\d{5}-\d{4}") # \d= digits \d{5}-\d{4} means 5 digits-4digits
matches = patt.finditer(mystr)
for match in matches:
print(match)
|
# Generated by Django 3.0.4 on 2020-08-12 19:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('expenses', '0009_auto_20200812_2047'),
]
operations = [
migrations.RenameModel(
old_name='UserIncome',
new_name='Income',
),
migrations.AlterField(
model_name='expense',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='expenses.Category'),
),
]
|
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import time
browser = webdriver.Chrome()
browser.maximize_window()
browser.implicitly_wait(5)
song = "highest in the room"
browser.get("https://genius.com") #opens genius.com
searchbox = browser.find_element_by_xpath("/html/body/div[1]/div/div[1]/form/input") #search box
searchbox.send_keys(song) #enter song name in search box
searchbox.submit() #press enter key
song_card = browser.find_element_by_xpath('/html/body/routable-page/ng-outlet/search-results-page/div/div[2]/div[1]/div[2]/search-result-section/div/div[2]/search-result-items/div[1]/search-result-item/div/mini-song-card/a/div[2]')
song_card.click() #clicks on song card to open lrics
lyrics = browser.find_element_by_xpath('/html/body/routable-page/ng-outlet/song-page/div/div/div[2]/div[1]/div/defer-compile[1]/lyrics/div/div/section/p')
content = lyrics.text #captures lyrics of song in content variable
file = open(f'C:\\Users\Rishabh\\Desktop\\{song}.txt', 'w+' , errors='ignore')
file.write(content)
file.close()
browser.quit()
driver = webdriver.Chrome()
driver.get('https://web.whatsapp.com')
name = input("enter the name of user:")
user = driver.find_element_by_xpath('//span[@title= "{}"]'.format(name))
user.click()
file = open(f'c:\\Users\\Rishabh\\Desktop\\{song}.txt', 'r')
content = file.read()
msg_box = driver.find_element_by_xpath('//*[@id="main"]/footer/div[1]/div[2]/div')
for word in content.split():
msg_box.send_keys(word)
msg_send = driver.find_element_by_xpath('//*[@id="main"]/footer/div[1]/div[3]/button')
msg_send.click()
driver.quit()
|
import torch
from cfg.config_general import cfg
import os
import errno
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_idx2word(word2idx):
#create idx2word
idx2word = {}
for w,i_w in word2idx.items():
if i_w in idx2word:
print("WARNING FOUND INDEX IN IDX2WORD BUT SHOULDNT HAVE")
else:
idx2word[i_w] = w
return idx2word
def save_data_results(res, out_dir, filename='quantitative_eval.csv'):
r_all = ""
for a in res:
r_all += str(a)+"\t"
with open('%s/%s' % (out_dir, filename), 'a') as fp:
fp.write(r_all+'\n')
def torch_integer_to_one_hot(integer_tensor, num_classes):
#expected input shape of integer_tensor = [batch_size, 1] or [batch_size, seq_len, 1]
#returns: one_hot vector of integer_tensor
one_tensor = torch.tensor(1)
if len(integer_tensor.shape)>1 and integer_tensor.shape[1]>1:
rel_dim = 2
ground_truth_one_hot = torch.FloatTensor(integer_tensor.shape[0], integer_tensor.shape[1], num_classes) # bs x (seq_len x) n_c
integer_tensor = integer_tensor.unsqueeze(rel_dim)
else:
rel_dim = 1
ground_truth_one_hot = torch.FloatTensor(integer_tensor.shape[0], num_classes) # bs x (seq_len x) n_c
if cfg.CUDA:
one_tensor = one_tensor.cuda()
ground_truth_one_hot = ground_truth_one_hot.cuda()
ground_truth_one_hot.zero_()
ground_truth_one_hot.scatter_(rel_dim, integer_tensor, one_tensor)
return ground_truth_one_hot
def tonp(pt_var):
#convert from pt to numpy for debug reasons
return pt_var.detach().cpu().numpy()
def weights_init(m):
classname = m.__class__.__name__
#initialize conv but not basicconv2d from inception that is already initialized
if classname.find('Conv') != -1 and (classname.find('BasicConv2d')==-1) and classname.find('MeanPool') == -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1 or classname.find('Layernorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0.0)
|
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
CUDA = torch.cuda.is_available()
|
from datetime import datetime
from pathlib import WindowsPath, PosixPath
from colorama import Fore, Style
import pandas as pd
import time
import os
from .ModelClass import ModelClass
# import config.py variables
from .config import db, server, user, _table, column_index, _sample_date, _sample_time, _time_span, column_name
# import helper functions from helper.py
from .helper import test_sql_details, test_date_and_time, default_model, default_query, check_output_dirs, convert_time, time_calc, convert_date
# default SQL driver (Windows)
driver = 'SQL SERVER'
linux = False
os_name = 'Windows'
default_path = ''
# if Linux
if os.name != 'nt':
linux = True
driver = 'ODBC Driver 17 for SQL Server'
os_name = 'Linux'
default_path = PosixPath.home()
else:
default_path = WindowsPath.home()
# suppress pandas splice copy warning
pd.options.mode.chained_assignment = None
def create_model(sample_date=_sample_date,
sample_time=_sample_time,
table=_table,
time_span=_time_span,
tag_name=column_name,
model_path=None,
query_path=None):
"""Create CSV model from database
:param str sample_date: Date of sample YYYY-MM-DD
:param str sample_time: Time of sample HH:MM:SS
:param str table: Name of target table in database
:param str time_span: Length of time needed for data in hours
:param str tag_name: Column name for tags
:param str model_path: Output directory for CSV model
:param str query_path: Output directory for CSV query
"""
# display SQL connection details
print(
f'{Fore.CYAN}\nCONNECTION DETAILS:{Style.RESET_ALL}{Fore.LIGHTWHITE_EX}'
f'\n\tSERVER: {server}'
f'\n\tDRIVER: {driver}'
f'\n\tDB: {db}'
f'\n\tUSER: {user}'
f'\n{Style.RESET_ALL}')
# display OS name
print(f'{Fore.GREEN}Running on {Fore.LIGHTWHITE_EX}{os_name}{Style.RESET_ALL}\r\n')
# test sql connection and table
test_sql_details(server, table)
# test sample date and time
test_date_and_time(sample_date, sample_time)
default_m = False
default_q = False
# use default if no directories specified
if model_path is None:
model_path = default_model(default_path)
default_m = True
if query_path is None:
query_path = default_query(default_path)
default_q = True
# check path of model and query output directories
check_output_dirs(model_path, query_path, default_m, default_q)
# start timer
start_time = time.time()
# create datetime string
_dt = datetime.combine(convert_date(sample_date), convert_time(sample_time))
model = ModelClass(date_time=_dt, time_span=time_span, table=table, column_index=column_index, column_name=tag_name)
model.set_model_output(model_path)
model.set_query_output(query_path)
model.create_query_df()
model.init_model_df()
# display output for file save
print(
f'{Fore.LIGHTGREEN_EX}'
f'\nSQL Query Saved: {Fore.YELLOW}{model.get_query_output()}'
f'{Style.RESET_ALL}')
model.create_query_csv()
# display output for size of dataframe
print(
f'{Fore.LIGHTGREEN_EX}'
f'\tBase Dataframe Created with {Fore.YELLOW}{len(model.get_model_df().columns)} '
f'{Fore.LIGHTGREEN_EX}columns.{Fore.LIGHTGREEN_EX}'
f'{Style.RESET_ALL}')
model.create_subset_list()
model.wait_for_threads_of_subclass()
model.set_model_df_at_time_step()
# display output for file save
print(
f'{Fore.LIGHTGREEN_EX}'
f'\nOutput Model Saved: {Fore.YELLOW}{model.get_model_output()}'
f'{Style.RESET_ALL}')
model.create_model_csv()
# display time elapsed
end_time = time.time()
hours_t, min_t, sec_t = time_calc(end_time - start_time)
print(
f'{Fore.LIGHTBLUE_EX}'
f'\nTime Elapsed: {Fore.LIGHTMAGENTA_EX}{hours_t} hours {min_t} minutes {sec_t} seconds.'
f'{Style.RESET_ALL}')
# MAIN
if __name__ == '__main__':
"""Main Loop"""
create_model()
|
# Generated by Django 3.0.2 on 2020-01-09 09:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('PRN', models.CharField(max_length=10, unique=True)),
],
),
migrations.CreateModel(
name='Marks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('semester', models.CharField(choices=[(1, 'Sem_1'), (2, 'Sem_2'), (3, 'Sem_3'), (4, 'Sem_4'), (5, 'Sem_5'), (6, 'Sem_6'), (7, 'Sem_7'), (8, 'Sem_8')], default=1, max_length=10)),
('subject1', models.CharField(max_length=20)),
('subject2', models.CharField(max_length=20)),
('subject3', models.CharField(max_length=20)),
('subject4', models.CharField(max_length=20)),
('PRN', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='result.Student')),
],
),
]
|
from django.shortcuts import render, redirect
from .models import User
from django.contrib import messages
import bcrypt
# Create your views here.
def index(request):
return render(request, "index.html")
# def validate_login(request):
# user = User.objects.get(email=request.POST['email']) # hm...¿Es realmente una buena idea usar aquí el método get?
# if bcrypt.checkpw(request.POST['password'].encode(), user.pw_hash.encode()):
# print("password match")
# else:
# print("failed password")
def register(request):
if request.method == 'POST':
errors = User.objects.reg_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
password = request.POST['password']
pw_hash = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
print(pw_hash)
new_user = User.objects.create(name=request.POST['name'], alias=request.POST['alias'], email=request.POST['email'], password=pw_hash)
print(new_user)
request.session['user_id'] = new_user.id
request.session['user_name'] = f"{new_user.name} {new_user.alias}"
request.session['status'] = "registered"
return redirect("/success") # nunca renderizar en una publicación, ¡siempre redirigir!
return redirect("/")
def login(request):
if request.method == 'POST':
errors = User.objects.log_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect('/')
else:
user = User.objects.filter(alias=request.POST['alias'])
if user:
logged_user = user[0] #solo hay un usuario con ese alias, por lo que se usa [0]
if bcrypt.checkpw(request.POST['password'].encode(), logged_user.password.encode()):
request.session['user_id'] = logged_user.id
request.session['user_name'] = f"{logged_user.name} {logged_user.alias}"
request.session['status'] = "Logged in"
return redirect('/success')
else:
messages.error(request, "password invalid")
return redirect("/")
def success(request):
return render (request, "success.html") |
'''
Description
Archana is very fond of strings. She likes to solve many questions related to strings. She comes across a problem which she is unable to solve. Help her to solve. The problem is as follows:-Given is a string of length L. Her task is to find the longest string from the given string with characters arranged in descending order of their ASCII code and in arithmetic progression. She wants the common difference should be as low as possible(at least 1) and the characters of the string to be of higher ASCII value.
Input
The first line of input contains an integer T denoting the number of test cases. Each test contains a string s of lengthL.
1<= T <= 100
3<= L <=1000
A<=s[i]<=Z
The string contains minimum three different characters.
Output
For each test case print the longest string.Case 1:Two strings of maximum length are possible- “CBA” and “RPQ”. But he wants the string to be of higher ASCII value therefore, the output is “RPQ”.Case 2:The String of maximum length is “JGDA”.
Sample Input 1
2
ABCPQR
ADGJPRT
Sample Output 1
RQP
JGDA
'''
|
"""
座右铭:吃饱不饿,努力学习
@project:预科
@author:Mr.Huang
@file:类变量和实例变量.PY
@ide:PyCharm
@time:2018-07-30 15:47:33
"""
#类变量:只有类名才能调用的变量,类变量一般在函数体之外
#实例变量:
class Employee(object):
#声明一个类变量,记录员工总人数
total_Emplyee_number=0#类变量需要打点调用
def __init__(self,name,salary):
self.name=name
self.salary=salary
#类变量在各个对象
Employee.total_Emplyee_number+=1
def get_total_number(self):
print('员工总体人数:',Employee.total_Emplyee_number)
#类变量在各个对象间共享,类只初始化一次
e1=Employee('张三',6000)
e1.get_total_number()
#实例变量的调用:对象名,实例变量
e2=Employee('李四',8000)
e2.get_total_number()
|
# https://www.codewars.com/kata/51ba717bb08c1cd60f00002f/train/python
"""
A format for expressing an ordered list of integers is to use a comma separated list of either individual integers
or a range of integers denoted by the starting integer separated from the end integer in the range by a dash, '-'.
The range includes all integers in the interval including both endpoints.
It is not considered a range unless it spans at least 3 numbers. For example ("12, 13, 15-17")
Complete the solution so that it takes a list of integers in increasing order and returns a correctly formatted string in the range format.
Example:
solution([-6, -3, -2, -1, 0, 1, 3, 4, 5, 7, 8, 9, 10, 11, 14, 15, 17, 18, 19, 20])
# returns "-6,-3-1,3-5,7-11,14,15,17-20"
"""
# %%
from typing import List
def solution(args: List[int]):
args = sorted(args)
lst = [a for a, b in zip(args, args[1:]) if abs(a - b) == 1]
exc = [a for a, b in zip(args, args[1:]) if abs(a - b) != 1]
print(f"{lst[0]}-{lst[-1]}")
print(f"{exc[0]}-{exc[-1]}")
# %%
solution([1, 2, 3, 4, 6, 7, 8, 9])
# %%
|
"""
* Exercise 2.7, Sutton.
* k = 10
"""
import numpy as np
from random import random as rand, randint as randrange
import argparse
import matplotlib.pyplot as plt
class KArmTestBed:
def __init__(self, num_simulations, time_steps, k):
self.num_simulations = num_simulations
self.time_steps = time_steps
self.k = k
self.results = {}
"""
* Add a test with a particular epsilon. Each test case is associated with one epsilon
* value.
"""
def add_test(self, epsilon):
self.epsilon = epsilon
self.results[self.epsilon] = 0
"""
* Run a test with a particular epsilon
* Stores average reward and average cumulative reward at time t for the test
* qa starts off with equal values, and take random walks
"""
def run_test(self):
total_reward_at_time_t = [0] * self.time_steps
total_cumulative_reward_at_time_t = [0] * self.time_steps
total_optimal_actions_at_time_t = [0] * self.time_steps
for sim in range(self.num_simulations):
if args.env == 's':
qa = np.random.normal(0, 1, self.k)
optimal_action_arm = np.argmax(qa)
else:
qa = [0] * self.k
optimal_action_arm = 0
# No epsilon greedy, but with optimistic estimate vs epsilon greedy with optimistic estimate
if self.epsilon == 0:
action_reward_estimate = [5] * self.k
else:
action_reward_estimate = [0] * self.k
cumulative_reward = 0
o = 0
for time in range(self.time_steps):
# If env is non stationary, modify true values at each time step
if args.env != 's':
qa = list(map(lambda x: x + np.random.normal(0, 0.01), qa))
optimal_action_arm = qa.index(max(qa))
if rand() > self.epsilon:
arm = action_reward_estimate.index(max(action_reward_estimate))
else:
arm = randrange(0, self.k-1)
if arm == optimal_action_arm:
total_optimal_actions_at_time_t[time] += 1
reward = np.random.normal(qa[arm], 1)
total_reward_at_time_t[time] += reward
cumulative_reward += reward
total_cumulative_reward_at_time_t[time] += cumulative_reward
alpha = args.alpha
o = o + alpha*(1-o)
beta = alpha/o
action_reward_estimate[arm] = action_reward_estimate[arm] + \
beta * (reward - action_reward_estimate[arm])
self.results[self.epsilon] = {
"Average Cumulative Reward at time t": list(map(lambda x: x / self.num_simulations, total_cumulative_reward_at_time_t)),
"Average Reward at time t": list(map(lambda x: x / self.num_simulations, total_reward_at_time_t)),
"Percentage Optimal Action at time t": list(map(lambda x: (x / self.num_simulations) * 100, total_optimal_actions_at_time_t))
}
"""
* Plot the average reward at each time step.
* Plot the percentage of optimal action at each time step
"""
def plot_results(self):
legend = []
plt.figure(1)
for epsilon in self.results:
plt.plot(np.arange(self.time_steps),
self.results[epsilon]["Average Reward at time t"])
legend.append("Epsilon = " + str(epsilon))
plt.legend(legend, loc='lower right')
plt.xlabel("Time Steps")
plt.ylabel("Average Reward")
plt.figure(2)
for epsilon in self.results:
plt.plot(np.arange(self.time_steps),
self.results[epsilon]["Percentage Optimal Action at time t"])
legend.append("Epsilon = " + str(epsilon))
plt.legend(legend, loc='lower right')
plt.xlabel("Time Steps")
plt.ylabel("% Optimal Action")
plt.show()
"""
* The number of tests and the provided epsilon values should be of same length
"""
if __name__ == '__main__':
"""
* Validate timesteps and number of simulations
"""
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError(
"%s is an invalid positive int value" % value)
return ivalue
"""
* Validate the value of epsilon or alpha entered by the user
"""
def check_epsilon_alpha(value):
epsilon_alpha = float(value)
if epsilon_alpha < 0 or epsilon_alpha > 1:
raise argparse.ArgumentTypeError(
"%s is an invalid epsilon/alpha value" % str(value))
return epsilon_alpha
"""
* Parse Arguments
"""
parser = argparse.ArgumentParser(
description='This program runs a K-Arm Test Bed simulation')
parser.add_argument('-s', '--num_simulations', action='store',
help="Number of Simulations to Run", type=check_positive, default=2000)
parser.add_argument('-t', '--time_steps', action='store',
help="Number of Time Steps per simulation", type=check_positive, default=1000)
parser.add_argument('-k', '--num_arms', action='store',
help="Number of Arms", type=check_positive, default=10)
parser.add_argument('-n', '--num_tests', action='store',
help='Number of tests to run', default=2, type=check_positive)
parser.add_argument('-e', '--epsilon', nargs='+', default=[0, 0.1],
help="Epsilon value for e-greedy algorithm", type=check_epsilon_alpha)
parser.add_argument('-a', '--alpha', default=0.1,
help="Constant step size for Value update", type=check_epsilon_alpha)
parser.add_argument('--env', action='store',
help='Stationary or Non Stationary env. -> s/n', default='n')
args = parser.parse_args()
"""
* Initialize and run simulations, plot results
"""
bandit = KArmTestBed(args.num_simulations, args.time_steps, args.num_arms)
print("Test Conditions:\nNumber of Simulations per Test: {}\nNumber of Time Steps per Simulation: {}\nNumber of Arms: {}\n"
.format(args.num_simulations, args.time_steps, args.num_arms))
if args.num_tests != len(args.epsilon):
raise argparse.ArgumentTypeError(
"Number of tests should be equal to number of epsilons")
for test in range(args.num_tests):
print("Running test {} with epsilon {}".format(
test+1, args.epsilon[test]))
bandit.add_test(args.epsilon[test])
bandit.run_test()
bandit.plot_results()
|
#!/usr/bin/env python3
"""
Sort out the six best and six worst months with a Google stock's historical prices file
Assignment 3,INF1340 Fall 2014
"""
__author__ = 'Xiwen Zhou, Juntian Wang,Susan Sim'
__email__ = "xw.zhou@mail.utoronto.ca,justinjtwang@gmail.com,ses@drsusansim.org"
__copyright__ = "2014 Susan Sim"
__license__ = "MIT License"
__status__ = "Prototype"
# imports one per line
import json
import os.path
import math
def read_json_from_file(file_name):
"""
Gets data into a list from json file
:param file_name: json file
:return: a list, contains data to be sorted
"""
with open(os.path.join(os.path.dirname(__file__), file_name)) as file_handle:
file_contents = file_handle.read()
return json.loads(file_contents)
class StockMiner:
def __init__(self, stock_file_name):
"""
Initializes variables,constructor for class StockMiner
:param stock_file_name:json file
"""
self.stock_data = []
self.monthly_averages_list = []
self.stock_data = read_json_from_file(stock_file_name)
self.dic_monthly = {}
self.average = 0
self.deviation_list = []
self.sum = 0
def month_averages(self):
"""
Calculates monthly averages close prices
:return:a list of tuples,containing month and corresponding average
"""
for daily_data in self.stock_data:
if daily_data["Date"][0:7] in self.dic_monthly:
# Sorts data on monthly basis while adding up for average calculation later
self.dic_monthly[daily_data["Date"][0:7]][0] += daily_data["Close"]*daily_data["Volume"]
self.dic_monthly[daily_data["Date"][0:7]][1] += daily_data["Volume"]
else:
self.dic_monthly[daily_data["Date"][0:7]] = [daily_data["Close"]*daily_data["Volume"],
daily_data["Volume"]]
for month in self.dic_monthly:
self.monthly_averages_list.append((month.replace("-", "/"),
round(self.dic_monthly[month][0] / self.dic_monthly[month][1], 2)))
# Calculates monthly averages and put them into a list
# Changes string - into / according to test file
# Round up to 2 decimals
def six_best_months(self):
"""
Sorts out six months with highest averages
:return:A list of tuple, containing month and corresponding average
"""
# Sort the list from highest to lowest then return the first six
for a in range(0, len(self.monthly_averages_list)-1):
for i in range(0, len(self.monthly_averages_list)-1):
if self.monthly_averages_list[i][1] < self.monthly_averages_list[i+1][1]:
self.monthly_averages_list[i], self.monthly_averages_list[i+1] = \
self.monthly_averages_list[i+1], self.monthly_averages_list[i]
return self.monthly_averages_list[0:6]
def six_worst_months(self):
"""cxc
Sorts out six months with lowest averages
:return:A list of tuple, containing month and corresponding average
"""
# Sort the list from lowest to highest then return the first six
for a in range(0, len(self.monthly_averages_list)-1):
for i in range(0, len(self.monthly_averages_list)-1):
if self.monthly_averages_list[i][1] > self.monthly_averages_list[i+1][1]:
self.monthly_averages_list[i], self.monthly_averages_list[i+1] = \
self.monthly_averages_list[i+1], self.monthly_averages_list[i]
return self.monthly_averages_list[0:6]
def standard_deviation(self):
self.month_averages()
for monthly_average in self.monthly_averages_list:
self.sum = self.sum + monthly_average[1]
self.average = self.sum/len(self.monthly_averages_list)
for monthly_average in self.monthly_averages_list:
self.deviation_list.append((monthly_average[1] - self.average) ** 2)
return round(math.sqrt(sum(self.deviation_list)/len(self.monthly_averages_list)), 2)
def read_stock_data(stock_name, stock_file_name):
"""
Manage data on monthly basis
:param stock_name:string, representing a json file
:param stock_file_name: json file
"""
global stock
stock = StockMiner(stock_file_name)
stock.month_averages()
def six_best_months():
"""
Sorts out six months with highest averages for calling in test file
:return:A list of tuple, containing month and corresponding average
"""
global stock
return stock.six_best_months()
def six_worst_months():
"""
Sorts out six months with lowest averages for calling in test file
:return:A list of tuple, containing month and corresponding average
"""
global stock
return stock.six_worst_months()
def compare_two_stocks(stock_name_1, stock_file_name_1, stock_name_2, stock_file_name_2):
"""
Identify which of the two stock files has the higher standard deviation of monthly averages.
:param stock_name_1: string,representing a json file
:param stock_file_name_1: a json file,containing stock data
:param stock_name_2: string,representing a json file
:param stock_file_name_2: a json file,containing stock data
:return:string,file name of the file with higher standard deviation of monthly averages or "Equal"
"""
stock1 = StockMiner(stock_file_name_1)
stock2 = StockMiner(stock_file_name_2)
if stock1.standard_deviation() < stock2.standard_deviation():
return stock_name_2
elif stock1.standard_deviation() > stock2.standard_deviation():
return stock_name_1
else:
return "Equal"
|
"""Module that provides a data structure representing a quantum system.
Data Structures:
QSystem: Quantum System, preferred over QRegistry (can save a lot of space)
Functions:
superposition: join two registries into one by calculating tensor product.
"""
import numpy as np
from qsimov.structures.qstructure import QStructure, _get_qubit_set, \
_get_op_data, _get_key_with_defaults
from qsimov.structures.qregistry import QRegistry, superposition
class QSystem(QStructure):
"""Quantum System, preferred over QRegistry (can save a lot of space)."""
def __init__(self, num_qubits, data=None, doki=None, verbose=False):
"""Initialize QSystem to state 0.
num_qubits -> number of QuBits in the system.
"""
if doki is None:
import doki
self.doki = doki
if data is None:
if num_qubits is None:
self.regs = None
self.qubitMap = None
self.num_qubits = 0
else:
self.regs = [[QRegistry(1, doki=self.doki), [id]]
for id in range(num_qubits)]
self.qubitMap = {id: id for id in range(num_qubits)}
self.num_qubits = num_qubits
else:
self.regs = [[QRegistry(None, data=data["regs"][i][0], doki=self.doki),
data["regs"][i][1]]
for i in range(len(data["regs"]))]
self.qubitMap = data["qubit_map"]
self.num_qubits = data["num_qubits"]
self.verbose = verbose
def get_data(self):
return {"regs": [[self.regs[i][0].get_data(), self.regs[i][1]]
for i in range(len(self.regs))],
"qubit_map": self.qubitMap,
"num_qubits": self.num_qubits}
def free(self, deep=False):
"""Release memory held by the QSystem."""
if self.regs is not None:
if deep:
for reg, _ in self.regs:
if isinstance(reg, QRegistry):
reg.free()
del self.regs
del self.qubitMap
self.regs = None
self.qubitMap = None
def clone(self, deep=False):
"""Clone this QSystem."""
new_sys = QSystem(None, doki=self.doki)
new_sys.num_qubits = self.num_qubits
new_sys.qubitMap = {}
for id in self.qubitMap:
new_sys.qubitMap[id] = self.qubitMap[id]
new_sys.regs = [[self.regs[id][0],
self.regs[id][1][:]]
if not deep else
[self.regs[id][0].clone(),
self.regs[id][1][:]]
for id in range(len(self.regs))]
return new_sys
def __del__(self):
"""Clean after deletion."""
self.free()
def prob(self, id, num_threads=-1):
"""Get the odds of getting 1 when measuring specified qubit."""
id = _get_qubit_set(self.get_num_qubits(), [id], True, "argument")[0]
reg, ids = self.regs[self.qubitMap[id]]
new_id = None
for i in range(len(ids)):
if ids[i] == id:
new_id = i
break
if new_id is None:
raise RuntimeError("Couldn't find id in any reg, " +
"please report this bug.")
return reg.prob(new_id, num_threads=num_threads)
def get_sizes(self):
"""Return the number of elements of each registry in the system."""
return ((reg[0].get_state_size(), reg[1])
if type(reg[0]) == QRegistry
else (1, reg[1])
for reg in self.regs)
def get_state_size(self):
"""Return the number of elements in the state vector of the system."""
total = 0
for reg in self.regs:
if type(reg[0]) == QRegistry:
total += reg[0].get_state_size()
else:
total += 1
return total
def get_split_num_qubits(self):
"""Return the number of qubits in each registry of the system."""
return (reg[0].get_num_qubits()
if type(reg[0]) == QRegistry
else 1 # When we measure with remove=True
for reg in self.regs)
def get_num_qubits(self):
"""Return the number of qubits in this system."""
return self.num_qubits
def measure(self, ids, random_generator=np.random.rand,
num_threads=-1, deep=False):
"""Measure specified qubits of this system and collapse.
Positional arguments:
ids -> List of QuBit ids that have to be measured
Keyworded arguments:
random_generator -> function without arguments that returns
a random real number in [0, 1)
Return:
List with the value obtained after each measure
"""
num_qubits = self.get_num_qubits()
ids = _get_qubit_set(num_qubits, ids, False, "ids")
if ids is None:
raise ValueError("ids cannot be None")
split_ids = {reg_id: set() for reg_id in range(len(self.regs))}
for qubit_id in ids:
reg_id = self.qubitMap[qubit_id]
split_ids[reg_id].add(qubit_id)
# In split ids we have reg_id -> set of ids to measure in that reg
split_ids = {k: v for k, v in split_ids.items() if len(v) > 0}
result = [None for i in range(num_qubits)]
# Here we have the registries that have not been used
untouched_regs = {i for i in range(len(self.regs))
if i not in split_ids}
# We create a new QSystem with the regs that have not been used
new_sys = QSystem(None, doki=self.doki)
new_sys.regs = []
new_sys.qubitMap = {}
exception = None
try:
for reg_id in untouched_regs:
reggie, reg_ids = self.regs[reg_id]
if deep:
reggie = reggie.clone()
reg_ids = reg_ids[:]
new_sys.regs.append((reggie, reg_ids))
for reg_id in range(len(untouched_regs)):
for qubit_id in new_sys.regs[reg_id][1]:
new_sys.qubitMap[qubit_id] = reg_id
new_sys.num_qubits = self.num_qubits
# We iterate through the registries that have a qubit in ids
for reg_id in split_ids:
partial_ids = split_ids[reg_id] # ids of QSystem to measure
new_reg = None
partial_result = None
reg, reg_ids = self.regs[reg_id]
# Ids to measure in the QRegistry (not in the whole QSystem)
# mapped to the id in the QSystem
new_ids = {i: reg_ids[i] for i in range(len(reg_ids))
if reg_ids[i] in partial_ids}
# Not measured ids in this registry
new_reg_ids = [id for id in reg_ids if id not in partial_ids]
# We measure registries
aux = reg.measure(new_ids.keys(),
random_generator=random_generator,
num_threads=num_threads)
new_reg, partial_result = aux
# We add the results to the result list
for local_id in new_ids:
result[new_ids[local_id]] = partial_result[local_id]
# We add the new registry to the list of regs
if len(new_reg_ids) > 0:
aux_id = len(new_sys.regs)
new_sys.regs.append((new_reg, reg_ids))
for id in new_reg_ids:
new_sys.qubitMap[id] = aux_id
else:
new_reg.free()
# We add new registries with only the measured qubits
for id in partial_ids:
one_reg = QRegistry(1, doki=self.doki,
verbose=self.verbose)
if result[id]:
one_aux = one_reg.apply_gate("X")
one_reg.free()
one_reg = one_aux
new_sys.regs.append((one_reg, [id]))
new_sys.qubitMap[id] = len(new_sys.regs) - 1
except Exception as ex:
exception = ex
if exception is not None:
del new_sys
raise exception
return (new_sys, result)
def as_qregistry(self, num_threads=-1, canonical=False):
"""Return this system as a QRegistry."""
aux_reg = None
new_reg = None
new_ids = []
first = True
for reg_id in range(len(self.regs)):
reg, ids = self.regs[reg_id]
if new_reg is None:
new_reg = reg
else:
aux_reg = superposition(new_reg, reg,
num_threads=num_threads,
verbose=self.verbose)
new_ids = ids + new_ids
if aux_reg is not None:
if not first:
del new_reg
first = False
new_reg = aux_reg
aux_reg = None
# Here we remove the unused ids
q_ids = [id for id in new_ids if new_reg.get_classic(id) is None]
swap_ids = np.argsort(np.argsort(q_ids))
# And we sort the remaining qubits by qubit_id
for i in range(len(swap_ids)):
while swap_ids[i] != i:
swap_targets = [swap_ids[i], swap_ids[swap_ids[i]]]
swap_ids[swap_targets[0]], swap_ids[i] = swap_targets
aux_reg = new_reg.apply_gate("SWAP",
targets=[i, swap_targets[0]],
num_threads=num_threads)
if not first:
del new_reg
new_reg = aux_reg
return new_reg
def get_state(self, key=None, canonical=False):
return self.as_qregistry().get_state(key=key, canonical=canonical)
def get_classic(self, id):
"""Return classic bit value."""
return None
def apply_gate(self, gate, targets=None, controls=None, anticontrols=None,
num_threads=-1, deep=False, target=None, control=None, anticontrol=None):
"""Apply specified gate to specified qubit with specified controls.
Positional arguments:
gate: string with the name of the gate to apply, or a QGate
Keyworded arguments:
targets: id of the least significant qubit the gate will target
controls: id or list of ids of the qubit that will act as
controls
anticontrols: id or list of ids of the qubit that will act as
anticontrols
num_threads: number of threads to use
optimize: only for QGates. Whether to use optimized lines or
user defined lines
"""
if target is not None:
print("[WARNING] target keyworded argument is deprecated. Please use targets instead")
if targets is not None:
raise ValueError("target argument can't be set alongside targets")
targets = target
if control is not None:
print("[WARNING] control keyworded argument is deprecated. Please use controls instead")
if controls is not None:
raise ValueError("control argument can't be set alongside controls")
controls = control
if anticontrol is not None:
print("[WARNING] anticontrol keyworded argument is deprecated. Please use anticontrols instead")
if anticontrols is not None:
raise ValueError("anticontrol argument can't be set alongside anticontrols")
anticontrols = anticontrol
if not np.allclose(num_threads % 1, 0):
raise ValueError("num_threads must be an integer")
num_threads = int(num_threads)
num_qubits = self.get_num_qubits()
op_data = _get_op_data(num_qubits, 0, gate, targets, None, None,
controls, anticontrols, None, None)
gate = op_data["gate"]
targets = op_data["targets"]
controls = op_data["controls"]
anticontrols = op_data["anticontrols"]
# We create a new system without the data of the parties
new_sys = QSystem(None, doki=self.doki)
new_sys.regs = []
new_reg = None
aux_reg = None
exception = None
try:
# If any of the affected qubits is marked as not usable
if any([self.get_classic(qubit_id) is not None
for qubit_id in targets]):
# we raise an exception
raise ValueError("Trying to apply gate to classic bit")
cfail = any([self.get_classic(qubit_id) is False
for qubit_id in controls])
acfail = any([self.get_classic(qubit_id) is True
for qubit_id in anticontrols])
if cfail or acfail:
if deep:
return self.clone(deep=True)
else:
return self
controls = {qubit_id for qubit_id in controls
if self.get_classic(qubit_id) is None}
anticontrols = {qubit_id for qubit_id in anticontrols
if self.get_classic(qubit_id) is None}
# All affected qubits
parties = controls.union(anticontrols).union(targets)
touched_regs = {self.qubitMap[qubit_id]
for qubit_id in parties}
for reg_id in range(len(self.regs)):
if reg_id not in touched_regs:
reggie, reg_ideses = self.regs[reg_id]
if deep and isinstance(reggie, QRegistry):
reggie = reggie.clone()
new_sys.regs.append([reggie, reg_ideses[:]])
# Create new qubit map
new_sys.qubitMap = {}
for reg_id in range(len(new_sys.regs)):
for qubit_id in new_sys.regs[reg_id][1]:
new_sys.qubitMap[qubit_id] = reg_id
new_sys.num_qubits = self.num_qubits
new_ids = []
merged = False
for reg_id in touched_regs:
curr_reg, curr_ids = self.regs[reg_id]
if new_reg is not None:
aux_reg = superposition(curr_reg, new_reg,
num_threads=num_threads,
verbose=self.verbose)
if merged:
del new_reg
else:
merged = True
new_reg = aux_reg
else:
new_reg = curr_reg
new_ids += curr_ids
inverse_map = {new_ids[qubit_id]: qubit_id
for qubit_id in range(len(new_ids))}
mapped_targets = [inverse_map[qubit_id]
for qubit_id in targets]
mapped_controls = {inverse_map[qubit_id]
for qubit_id in controls}
mapped_anticontrols = {inverse_map[qubit_id]
for qubit_id in anticontrols}
aux_reg = new_reg.apply_gate(gate, targets=mapped_targets,
controls=mapped_controls,
anticontrols=mapped_anticontrols,
num_threads=num_threads)
if merged:
del new_reg
new_reg = None
new_sys.regs.append([aux_reg, new_ids])
for id in new_ids:
new_sys.qubitMap[id] = len(new_sys.regs) - 1
except Exception as ex:
if new_sys is not None:
del new_sys
if new_reg is not None and merged:
del new_reg
if aux_reg is not None:
del aux_reg
new_sys = None
exception = ex
if exception is not None:
raise exception
return new_sys
def get_bloch_coords(self, key=None):
"""Get the polar coordinates of all ONE qubit registries."""
start, stop, step = _get_key_with_defaults(key, self.num_qubits,
0, self.num_qubits, 1)
ids = [id for id in range(start, stop, step)]
coords = [None for id in ids]
for i in range(len(ids)):
id = ids[i]
try:
reg, ids = self.regs[self.qubitMap[id]]
new_id = ids.index(id)
coords[i] = reg.get_bloch_coords(new_id)
except Exception:
pass
if key is not None and type(key) != slice:
coords = coords[0]
return coords
def join_systems(most, least, deep=False):
"""Return a system that contains both a and b systems."""
res = QSystem(None, doki=most.doki)
res.regs = []
res.qubitMap = {}
exception = None
try:
count = 0
for reg, ids in least.regs:
new_reg = reg
if reg == QRegistry:
if deep:
new_reg = reg.clone()
count += 1
res.regs.append([new_reg, ids[:]])
offset = least.get_num_qubits()
for reg, ids in most.regs:
new_reg = reg
if reg == QRegistry:
if deep:
new_reg = reg.clone()
count += 1
res.regs.append([new_reg, [id + offset for id in ids]])
for i in range(len(res.regs)):
_, ids = res.regs[i]
for qubit_id in ids:
res.qubitMap[qubit_id] = i
except Exception as ex:
exception = ex
if exception is not None:
del res
raise exception
return res
|
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.get("https://www.baidu.com")
time.sleep(3)
#单元素定位
# driver.find_element_by_id("")
# driver.find_element_by_name("")
# driver.find_element_by_class_name("")
# driver.find_element_by_tag_name("")#标签名称
# driver.find_element_by_link_text()#链接标签全部
# driver.find_element_by_partial_link_text("")#链接标签包含
# driver.find_element_by_xpath("//name[@id='']")#相对路径
# driver.find_element_by_css_selector("")
# 多元素定位
#driver.find_elements
driver.quit() |
# coding=utf-8
def decorator_maker_with_arguments(decorator_arg1, decorator_arg2):
print "Я создаю декораторы! И я получил следующие аргументы:", decorator_arg1, decorator_arg2
def my_decorator(func):
print "(Ака декоратор)Я - декоратор. И ты всё же смог передать мне(декоратору) эти аргументы:", decorator_arg1, decorator_arg2
# Не перепутайте аргументы декораторов с аргументами функций!
def wrapped(function_arg1, function_arg2):
print ("Я - обёртка вокруг декорируемой функции.\n сюда добавляется добавочный функционал\n"
"И я имею доступ ко всем аргументам: \n"
"\t- и декоратора: {0} {1}\n"
"\t- и функции: {2} {3}\n"
"Теперь я могу передать нужные аргументы дальше"
.format(decorator_arg1, decorator_arg2,# опять передача данных в строку, после .format
function_arg1, function_arg2)) # в скобках идут переменные которые последовательно будут ы
return func(function_arg1, function_arg2) # вставленн в стоку вместо их порядкового номера
# (начиная с ноля) заключенного в фигурные скобки {0},{1} и т. д.
return wrapped
return my_decorator
@decorator_maker_with_arguments("Леонард", "Шелдон")
def decorated_function_with_arguments(function_arg1, function_arg2): # непонятно как функцию готовую передать с аргументами
print ("Я - декорируемая функция и я знаю только о своих аргументах: {0}"
" {1}".format(function_arg1, function_arg2))
decorated_function_with_arguments("Раджеш", "Говард")
# выведет:
# Я создаю декораторы! И я получил следующие аргументы: Леонард Шелдон
# Я - декоратор. И ты всё же смог передать мне эти аргументы: Леонард Шелдон
# Я - обёртка вокруг декорируемой функции.
# И я имею доступ ко всем аргументам:
# - и декоратора: Леонард Шелдон
# - и функции: Раджеш Говард
# Теперь я могу передать нужные аргументы дальше
# Я - декорируемая функция и я знаю только о своих аргументах: Раджеш Говард |
''' Version 1.000
Code provided by Daniel Jiwoong Im and Chris Dongjoo Kim
Permission is granted for anyone to copy, use, modify, or distribute this
program and accompanying programs and documents for any purpose, provided
this copyright notice is retained and prominently displayed, along with
a note saying that the original programs are available from our
web page.
The programs and documents are distributed without any warranty, express or
implied. As the programs were written for research purposes only, they have
not been tested to the degree that would be advisable in any important
application. All use of these programs is entirely at the user's own risk.'''
'''Demo of Generating images with recurrent adversarial networks.
For more information, see: http://arxiv.org/abs/1602.05110
'''
import time, timeit
import hickle as hkl
import theano
import numpy as np
import os, sys, glob
import gzip
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from tempfile import TemporaryFile
from optimize_gan import *
from recGanI import *
from gran import *
from deconv import *
from utils import *
from util_cifar10 import *
cifar10_datapath='/eecs/research/asr/chris/DG_project/dataset/cifar-10-batches-py/'
lsun_datapath='/local/scratch/chris/church/preprocessed_toy_10/'
mnist_datapath = '/data/mnist/'
if not os.path.exists(os.path.dirname(os.path.realpath(__file__)) + "/figs/"):
os.makedirs(os.path.dirname(os.path.realpath(__file__)) + "/figs/")
if not os.path.exists(os.path.dirname(os.path.realpath(__file__)) + "/figs/cifar10"):
os.makedirs(os.path.dirname(os.path.realpath(__file__)) + "/figs/cifar10")
if not os.path.exists(os.path.dirname(os.path.realpath(__file__)) + "/params/"):
os.makedirs(os.path.dirname(os.path.realpath(__file__)) + "/params/")
def visualize_knn(train_set, samples, kfilename, k=7):
Ns,D = samples.shape
distmtx = (dist2hy(samples, train_set[0]))
min_knn_ind = T.argsort(distmtx,axis=1)[:,:k]
closest_datas = train_set[0][min_knn_ind].eval()
tmp = np.concatenate([samples.reshape(Ns,1,D), np.ones((Ns,1,D))], axis=1)
output = np.concatenate([tmp, closest_datas], axis=1).reshape(Ns*(k+2), D)
if (filename == 'CIFAR10'):
display_images(output * 255., (Ns,k+2), fname='./figs/cifar10/inq/nn_ts5')
elif(filename == 'LSUN'):
display_images(np.asarray(output * 255, dtype='int32'), tile_shape = (Ns, k+2), img_shape=(64,64),fname='./figs/lsun/inq/nn_ts5_lsun');
elif(filename == 'MNIST'):
display_dataset(output, (28,28), (Ns,k+2), i=1, fname='./figs/MNIST/inq/nn_ts5')
return
def load_model(filename, model_name):
if (model_name == ''):
if (filename == 'CIFAR10'):
model = unpickle(os.path.dirname(os.path.realpath(__file__)) + '/params/'+'recgan_num_hid100.batch100.eps_dis0.0001.eps_gen0.0002.num_z100.num_epoch15.lam1e-06.ts3.ckern128.data.10_lsun_get_eps(70).hbias_rem.z=zs[0]10.save')
elif(filename == 'LSUN'):
model = unpickle(os.path.dirname(os.path.realpath(__file__)) + '/params/'+'recgan_num_hid100.batch100.eps_dis0.0001.eps_gen0.0002.num_z100.num_epoch15.lam1e-06.ts3.ckern128.data.10_lsun_get_eps(70).hbias_rem.z=zs[0]10.save')
save_the_weight(model.params, './params/lsun_ts3')
exit()
elif(filename == 'MNIST'):
model = unpickle(os.path.dirname(os.path.realpath(__file__)) + '/params/'+'gran_param_cifar10_ts5_2.save')
else:
model = unpickle(os.path.dirname(os.path.realpath(__file__)) + '/params/'+model_name)
return model
def set_up_train(model, opt_params):
compile_start = timeit.default_timer()
opt = Optimize(opt_params)
get_samples = opt.get_samples(model)
compile_finish = timeit.default_timer()
print 'Compile Time %f ' % ( compile_finish - compile_start)
#return opt, get_samples, get_seq_drawing
return opt, get_samples
def main(train_set, valid_set, test_set, opt_params, filename):
batch_sz, epsilon_gen, epsilon_dis, momentum, num_epoch, N, Nv, Nt = opt_params # TODO coonsider making epsilon into epsilon dis and gen separately.
# compute number of minibatches for training, validation and testing
num_batch_train = N / batch_sz
num_batch_valid = Nv / batch_sz
num_batch_test = Nt / batch_sz
model = load_model(filename, model_name)
#opt, get_samples, get_seq_drawing = set_up_train(ganI, train_set, valid_set, test_set, opt_params)
opt, get_samples = set_up_train(model, opt_params)
#Flags
vis_knnF=1
vis_seqF=1
if vis_knnF:
num_sam=7
samples = get_samples(num_sam)
samples = samples.reshape((num_sam, samples.shape[2]*samples.shape[3]*samples.shape[1]))
knn_samples = visualize_knn(train_set, samples, filename);
if vis_seqF:
get_seq_drawing = opt.get_seq_drawing(model)
seq_samples = get_seq_drawing(10)
seq_samples = seq_samples.reshape((seq_samples.shape[0]*seq_samples.shape[1]\
,seq_samples.shape[2]*seq_samples.shape[3]*seq_samples.shape[4]))
if (filename == 'CIFAR10'):
display_images(seq_samples * 255., (model.num_steps,10), fname='./figs/cifar10/inq/seq_drawing_ts5_cifar10')
elif(filename == 'LSUN'):
display_images(seq_samples * 255., (model.num_steps,10), img_shape=(64,64), fname='./figs/lsun/inq/seq_drawing_ts5_lsun')
elif(filename == 'MNIST'):
display_dataset(seq_samples, (28,28), (model.num_steps,10), i=1, fname='./figs/MNIST/inq/seq_drawing_ts5_mnist')
### MODEL PARAMS
# CONV (DISC)
conv_num_hid= 100
num_channel = 3
num_class = 1
# ganI (GEN)
filter_sz = 4 #FIXED
nkerns = [8,4,2,1]
ckern = 128
num_hid1 = nkerns[0]*ckern*filter_sz*filter_sz
num_z = 100
lam = 0.00003
### OPT PARAMS
batch_sz = 100#128
epsilon = 0.0002
momentum = 0.0 #Not Used
### TRAIN PARAMS
num_epoch = 50
epoch_start = 0
if __name__ == '__main__':
filename = raw_input('Enter dataset name MNIST/CIFAR10/LSUN: ')
model_name = raw_input('Enter your model name (if none, leave blank): ')
#######MNIST#########
if (filename == 'MNIST'):
dataset = mnist_datapath+'/mnist.pkl.gz'
f = gzip.open(dataset, 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
N ,D = train_set[0].shape; Nv,D = valid_set[0].shape; Nt,D = test_set[0].shape
train_set = shared_dataset(train_set)
valid_set = shared_dataset(valid_set)
test_set = shared_dataset(test_set )
#######CIFAR10#######
elif (filename == 'CIFAR10'):
train_set, valid_set, test_set = load_cifar10(path=cifar10_datapath)
train_set[0] = train_set[0] / 255.
valid_set[0] = valid_set[0] / 255.
test_set[0] = test_set[0] / 255.
# print("before shared train_set[0]: ", train_set[0].shape);
N ,D = train_set[0].shape; Nv,D = valid_set[0].shape; Nt,D = test_set[0].shape
train_set = shared_dataset(train_set)
valid_set = shared_dataset(valid_set)
test_set = shared_dataset(test_set )
# print 'num_z %d' % (num_z)
#######LSUN#######
elif (filename == 'LSUN'):
# store the filenames into a list.
train_filenames = sorted(glob.glob(lsun_datapath + 'train_hkl_b100_b_100/*' + '.hkl'))
valid_filenames = sorted(glob.glob(lsun_datapath + 'val_hkl_b100_b_100/*' + '.hkl'))
test_filenames = sorted(glob.glob(lsun_datapath + 'test_hkl_b100_b_100/*' + '.hkl'))
train_data = hkl.load(train_filenames[0]) / 255.
train_data = train_data.astype('float32').transpose([3,0,1,2]);
a,b,c,d = train_data.shape
train_data = train_data.reshape(a,b*c*d)
train_set = [train_data, np.zeros((a,))]
# print (train_filenames)
train_data_cllct = train_data;
# for purposes of setting up model.
for i in xrange(1,len(train_filenames)):
# for i in xrange(1,2):#TODO: find if its above forloop.
train_data = hkl.load(train_filenames[i]) / 255.
train_data = train_data.astype('float32').transpose([3,0,1,2]);
a,b,c,d = train_data.shape
train_data = train_data.reshape(a,b*c*d)
train_data_cllct = np.vstack((train_data_cllct, train_data))
# print(train_data_cllct.shape);
train_set = [train_data_cllct, np.zeros((a,))]
val_data = hkl.load(valid_filenames[0]) / 255.
val_data = val_data.astype('float32').transpose([3,0,1,2]);
a,b,c,d = val_data.shape
val_data = val_data.reshape(a, b*c*d)
valid_set = [val_data, np.zeros((a,))]
test_set = valid_set
N ,D = train_set[0].shape; Nv,D = valid_set[0].shape; Nt,D = test_set[0].shape
train_set = shared_dataset(train_set)
valid_set = shared_dataset(valid_set)
test_set = shared_dataset(test_set)
opt_params = [batch_sz, epsilon, momentum, num_epoch, N, Nv, Nt,lam]
book_keeping = main(train_set, valid_set, test_set, opt_params,filename)
|
#!/usr/bin/env python
# RVA Makerfest RetroPi controller
# Laser target button
# Adam
import RPi.GPIO as GPIO, time, os
import random
from subprocess import Popen, PIPE
F_PIN = 14
G_PIN = 4
light_sensor_pin = 18
servo_pin = 12
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(servo_pin, GPIO.OUT)
GPIO.setup(F_PIN, GPIO.OUT )
GPIO.output(F_PIN, 1)
GPIO.output(G_PIN, 1)
pwm = GPIO.PWM(servo_pin, 50)
def read_light_sensor (RCpin):
reading = 0
GPIO.setup(RCpin, GPIO.OUT)
GPIO.output(RCpin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(RCpin, GPIO.IN)
# This takes about 1 millisecond per loop cycle
while (GPIO.input(RCpin) == GPIO.LOW):
reading += 1
if reading < 300:
GPIO.output(F_PIN, 0)
time.sleep(0.1)
GPIO.output(F_PIN, 1)
def move_servo():
x = random.randrange(100)
pwm.start(x)
time.sleep(0.1)
def main():
while True:
read_light_sensor(light_sensor_pin)
move_servo()
if __name__ == '__main__':
main()
|
import cv2 as cv
class Filtragem:
def __init__(self):
pass
@staticmethod
def linear(imagem, matrix=(1, 1)):
return cv.blur(imagem, matrix)
@staticmethod
def linearMediano(imagem, intensidade):
return cv.medianBlur(imagem, intensidade)
@staticmethod
def porMetodoGaussian(imagem, suavizacao, matrix=(1, 1)):
return cv.GaussianBlur(imagem, matrix, suavizacao)
@staticmethod
def bilateral(imagem, tamanho, sigmarCor, sigmaEspaco):
return cv.bilateralFilter(imagem, tamanho, sigmarCor,
sigmaEspaco)
|
from watson_developer_cloud import SpeechToTextV1
class Speech_To_Text_Component:
def __init__(self,debug_mode=False):
self.debug_mode=debug_mode
f = open("key.txt", "r")
f1 = f.read().splitlines()
f.close()
self.speech_to_text = SpeechToTextV1(
iam_apikey=f[14],
url=f[15]
)
speech_to_text = SpeechToTextV1(
iam_apikey='{iam_api_key}',
url='{url}'
) |
from django.contrib.auth.models import AbstractUser
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from .validators import custom_year_validator
class CustomUser(AbstractUser):
class PermissionsRoleChoice(models.TextChoices):
USER = 'user', _('user')
MODERATOR = 'moderator', _('moderator')
ADMIN = 'admin', _('admin')
bio = models.TextField(blank=True, null=True)
email = models.EmailField(_('email address'), unique=True)
role = models.CharField(
max_length=50,
choices=PermissionsRoleChoice.choices,
default=PermissionsRoleChoice.USER
)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __str__(self):
return self.email
class Category(models.Model):
name = models.CharField(
max_length=200,
unique=True,
db_index=True,
verbose_name='Название категории'
)
slug = models.SlugField(
max_length=300,
unique=True,
verbose_name='Метка категории'
)
def __str__(self) -> str:
return self.name
class Meta:
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
ordering = ('name',)
class Genre(models.Model):
name = models.CharField(
max_length=200,
unique=True,
db_index=True,
verbose_name='Название жанра'
)
slug = models.SlugField(
max_length=300,
unique=True,
verbose_name='Метка жанра'
)
def __str__(self) -> str:
return self.name
class Meta:
verbose_name = 'Жанр'
verbose_name_plural = 'Жанры'
ordering = ('name',)
class Title(models.Model):
name = models.CharField(
max_length=200,
unique=True,
db_index=True,
verbose_name='Название'
)
year = models.IntegerField(
null=True,
verbose_name='Год',
validators=[
custom_year_validator,
]
)
description = models.TextField(
null=True,
verbose_name='Описание произведения'
)
category = models.ForeignKey(
Category,
on_delete=models.SET_NULL,
blank=True,
null=True,
related_name='titles',
verbose_name='Категория'
)
genre = models.ManyToManyField(
Genre,
blank=True,
related_name='titles',
verbose_name='Жанр'
)
def __str__(self) -> str:
return self.name
class Meta:
ordering = ['name']
verbose_name = 'Произведение'
verbose_name_plural = 'Произведения'
class Review(models.Model):
text = models.TextField(verbose_name='Текст')
score = models.IntegerField(
verbose_name='Оценка',
validators=[
MinValueValidator(1, message='Введите число не меньше 1'),
MaxValueValidator(10, message='Введите число не больше 10')
]
)
pub_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата публикации'
)
title = models.ForeignKey(
Title,
on_delete=models.CASCADE,
db_index=True,
related_name='reviews',
verbose_name='Произведение'
)
author = models.ForeignKey(
CustomUser,
on_delete=models.CASCADE,
related_name='reviews',
verbose_name='Автор'
)
def __str__(self) -> str:
return (f'{self.author} написал {self.text} на {self.title}.'
f'{self.author} оценил {self.title} на {self.score}.'
f'{self.pub_date}.')
class Meta:
verbose_name = 'Рецензия'
verbose_name_plural = 'Рецензии'
ordering = ('-pub_date', 'author',)
class Comment(models.Model):
text = models.TextField(verbose_name='Текст')
pub_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата публикации'
)
review = models.ForeignKey(
Review,
on_delete=models.CASCADE,
db_index=True,
related_name='comments',
blank=True,
null=True,
verbose_name='Рецензия'
)
author = models.ForeignKey(
CustomUser,
on_delete=models.CASCADE,
related_name='comments',
verbose_name='Автор'
)
def __str__(self) -> str:
return (f'{self.author} написал {self.text} на {self.review}.'
f'{self.pub_date}.')
class Meta:
verbose_name = 'Комментарий'
verbose_name_plural = 'Комментарии'
ordering = ('-pub_date', 'author',)
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLUE processors and helpers """
import numpy as np
import jsonlines
import os
import warnings
from dataclasses import asdict
from enum import Enum
from typing import List, Optional, Union
from ...file_utils import is_tf_available
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from .utils import DataProcessor, InputExample, InputFeatures
from torch.utils.data.dataset import Dataset
import random
import torch
if is_tf_available():
import tensorflow as tf
logger = logging.get_logger(__name__)
DEPRECATION_WARNING = (
"This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/master/examples/text-classification/run_glue.py"
)
def glue_convert_examples_to_features(
examples: Union[List[InputExample], "tf.data.Dataset"],
tokenizer: PreTrainedTokenizer,
max_length: Optional[int] = None,
task=None,
label_list=None,
output_mode=None,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length. Defaults to the tokenizer's max_len
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset`` containing the
task-specific features. If the input is a list of ``InputExamples``, will return a list of task-specific
``InputFeatures`` which can be fed to the model.
"""
warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning)
if is_tf_available() and isinstance(examples, tf.data.Dataset):
if task is None:
raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.")
return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
return _glue_convert_examples_to_features(
examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode
)
def factcheck_convert_examples_to_features(
examples: List[InputExample],
tokenizer: PreTrainedTokenizer,
max_length: Optional[int] = 512,
task=None,
label_list=None,
output_mode=None,
):
if max_length is None:
max_length = tokenizer.max_len
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
def label_from_example(example: InputExample) -> Union[int, float, None]:
if example.label is None:
return None
if output_mode == "classification":
return label_map[example.label]
elif output_mode == "regression":
return float(example.label)
raise KeyError(output_mode)
labels = [label_from_example(example) for example in examples]
batch_encoding = tokenizer(
[(example.text_a, example.text_b) for example in examples],
max_length=max_length,
# truncation=True,
add_special_tokens=True,
truncation='only_first',
return_token_type_ids=True
)
# pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet
# pad_token=tokenizer.pad_token_id,
# pad_token_segment_id=tokenizer.pad_token_type_id,
features = []
for i in range(len(examples)):
inputs = {k: batch_encoding[k][i] for k in batch_encoding}
feature = InputFeatures(**inputs, label=labels[i])
features.append(feature)
for i, example in enumerate(examples[:5]):
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("features: %s" % features[i])
return features
class OutputMode(Enum):
classification = "classification"
regression = "regression"
class PolifactProcessor(DataProcessor):
def __init__(self, args, **kwargs):
# super().__init__(*args, **kwargs)
# print(args)
warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
self.is_binary = args.is_binary # binary or multi
self.has_evidence = args.has_evidence #False
self.subtask = args.politifact_subtask #'liar' # liar, covid
self.output_mode = args.output_mode
self.filter_middle_classes = args.filter_middle_classes
self.few_shot = args.few_shot
self.myth = args.myth
self.fever = args.fever
self.liar = args.liar
self.seed_ = args.seed
self.covidpoli = args.covidpoli
self.multi2binary = {
"true" : "true",
"mostly-true": "true",
"half-true": "true",
"barely-true": "false",
"false": "false",
"pants-fire": "false",
"NOT ENOUGH INFO": "false",
"REFUTES": "_",
"SUPPORTS": "true"
}
if self.output_mode == 'regression':
self.labels = [None]
if self.is_binary:
# classification binary
self.labels = ["true", "false"]
else:
if self.fever:
self.labels = ["REFUTES", "SUPPORTS", "NOT ENOUGH INFO"]
else:
self.labels = ["true", "mostly-true", "half-true", "barely-true", "false", "pants-fire"]
# def get_example_from_tensor_dict(self, tensor_dict):
# """See base class."""
# return InputExample(
# tensor_dict["idx"].numpy(),
# tensor_dict["sentence1"].numpy().decode("utf-8"),
# tensor_dict["sentence2"].numpy().decode("utf-8"),
# str(tensor_dict["label"].numpy()),
# )
def get_train_examples(self, data_dir):
if self.has_evidence:
# if self.fever:
# path_ = '/home/yejin/fever/data/fever_train_for_bert.jsonl'
# # path_ = "{}/naacl/fever_train_for_bert_w_ppl.jsonl".format(data_dir)
# elif self.myth:
# path_ = '/home/yejin/covid19_factcheck/data/covid_myth_test_v3.jsonl'
# elif self.liar:
# path_ = "{}/politifact/{}/liar-plus_train_v3.jsonl".format(data_dir, self.subtask)
# # path_ ='/home/nayeon/covid19_factcheck/data/liar-plus_train_v3_justification_top1_naacl.jsonl'
# elif self.covidpoli:
# path_='/home/yejin/covid19_factcheck/data/factcheck_data/politifact/liar/test_covid19_justification_naacl.jsonl'
# else:
# # using FEVER-based evidences
# if any([self.use_credit, self.use_metainfo, self.use_creditscore]):
# path_ = "{}/politifact/{}/train_evidence_meta_fever_v4a.jsonl".format(data_dir, self.subtask)
# else:
# print("reading data")
# path_ = "{}/politifact/{}/train_evidence_meta_fever_v4a.jsonl".format(data_dir, self.subtask)
# # ============ PATH DONE ============
# print("loading from {}".format(path_))
# with jsonlines.open(path_) as reader:
# obj_list = [obj for obj in reader]
# if self.filter_middle_classes:
# obj_list = [obj for obj in obj_list if obj['label'] not in ['half-true','barely-true']]
if self.few_shot:
if self.fever:
path_ = '/home/yejin/fever/data/fever_train_for_bert_s.jsonl'
eval_file ='/home/nayeon/covid19_factcheck/ppl_results/naacl.gpt2.uni.fever_train_small.npy'
elif self.liar:
path_ = "/home/nayeon/covid19_factcheck/data/liar-plus_train_v3_justification_top1_naacl.jsonl"
eval_file ='/home/nayeon/covid19_factcheck/ppl_results/naacl.gpt2.uni.liar_train_justification_top1.npy'
elif self.covidpoli:
path_ = '/home/yejin/covid19_factcheck/data/factcheck_data/politifact/liar/test_covid19_justification_naacl.jsonl'
eval_file ='/home/nayeon/covid19_factcheck/ppl_results/naacl.gpt2.uni.naacl_covid_politifact_justification.npy'
elif self.myth:
path_ = '/home/yejin/covid19_factcheck/data/covid_myth_test_v3.jsonl'
eval_file ='/home/nayeon/covid19_factcheck/ppl_results/naacl.gpt2.uni.naacl_covid_myth_v3.npy'
all_objs = self.load_full_liar_with_ppl(path_, eval_file)
combined_all_objs = all_objs['true'] + all_objs['false']
random.seed(self.seed_)
random.shuffle(combined_all_objs)
obj_list = combined_all_objs[:self.few_shot]
print("Looking from here {}".format(path_))
print("Using few shot!!!! LEN: ", len(obj_list))
return self._create_examples_with_evidences(obj_list, "train")
else:
if self.fever:
path_ = '/home/yejin/fever/data/fever_train_for_bert_s.jsonl'
elif self.liar:
path_ = "/home/nayeon/covid19_factcheck/data/liar-plus_train_v3_justification_top1_naacl.jsonl"
with jsonlines.open(path_) as reader:
obj_list = [obj for obj in reader if obj['label'] != 'REFUTES']
print("Train from {}".format(path_))
print("Train {} Samples".format(len(obj_list)))
return self._create_examples_with_evidences(obj_list, "train")
else:
if self.fever:
path_ = "{}/naacl/fever_train_for_bert_w_ppl.jsonl".format(data_dir)
with jsonlines.open(path_) as reader:
obj_list = [obj for obj in reader if obj['evidences'] != [] and obj['evidences'][0][0] != 0]
if self.few_shot:
new_obj_list = obj_list[:self.few_shot]
obj_list = new_obj_list
print("Using few shot!!!! LEN: ", len(obj_list))
return self._create_fever_examples(obj_list, "train")
else:
path_ = "{}/politifact/{}/train{}.tsv".format(data_dir, self.subtask, data_source)
print("loading from {}".format(path_))
return self._create_examples(self._read_tsv(path_), "train")
# return self._create_examples(self._read_tsv(os.path.join(data_dir, "train{}.tsv".format(self.data_source))), "train")
def get_dev_examples(self, data_dir):
if self.has_evidence:
if self.few_shot:
if self.fever:
path_ = "{}/naacl/fever_test_for_bert_w_ppl.jsonl".format(data_dir)
with jsonlines.open(path_) as reader:
obj_list = [obj for obj in reader if obj['label'] != 'REFUTES']
elif self.liar:
path_ ='/home/nayeon/covid19_factcheck/data/liar-plus_test_v3_justification_top1_naacl.jsonl'
with jsonlines.open(path_) as reader:
obj_list = [obj for obj in reader if obj['label'] != 'REFUTES']
elif self.myth:
path_ = '/home/yejin/covid19_factcheck/data/covid_myth_test_v3.jsonl'
eval_file = '/home/nayeon/covid19_factcheck/ppl_results/naacl.gpt2.uni.naacl_covid_myth_v3.npy'
all_objs = self.load_full_liar_with_ppl(path_, eval_file)
combined_all_objs = all_objs['true'] + all_objs['false']
random.seed(self.seed_)
random.shuffle(combined_all_objs)
obj_list = combined_all_objs[self.few_shot + 1:]
elif self.covidpoli:
path_ = '/home/yejin/covid19_factcheck/data/factcheck_data/politifact/liar/test_covid19_justification_naacl.jsonl'
eval_file = '/home/nayeon/covid19_factcheck/ppl_results/naacl.gpt2.uni.naacl_covid_politifact_justification.npy'
all_objs = self.load_full_liar_with_ppl(path_, eval_file)
combined_all_objs = all_objs['true'] + all_objs['false']
random.seed(self.seed_)
random.shuffle(combined_all_objs)
print(len(combined_all_objs))
obj_list = combined_all_objs[self.few_shot+1:]
# random.seed(self.seed_)
# obj_list = obj_list[:self.few_shot]
print("Using few dev shot!!!! LEN: ", len(obj_list))
print("loading from dev !! {}".format(path_))
return self._create_examples_with_evidences(obj_list, "dev")
else:
if self.fever:
path_ = "{}/naacl/fever_test_for_bert_w_ppl.jsonl".format(data_dir)
with jsonlines.open(path_) as reader:
obj_list = [obj for obj in reader if obj['label'] != 'REFUTES']
elif self.liar:
path_ ='/home/nayeon/covid19_factcheck/data/liar-plus_test_v3_justification_top1_naacl.jsonl'
with jsonlines.open(path_) as reader:
obj_list = [obj for obj in reader if obj['label'] != 'REFUTES']
print("Evalutate from {}".format(path_))
print("Evaluate {} samples".format(len(obj_list)))
return self._create_examples_with_evidences(obj_list, "dev")
# else:
# if self.fever:
# path_ = "{}/naacl/fever_valid_for_bert_w_ppl_s.jsonl".format(data_dir)
# # path_ = "{}/naacl/fever_test_for_bert_w_ppl_{}_test.jsonl".format(data_dir, self.cross_validation)
# with jsonlines.open(path_) as reader:
# obj_list = [obj for obj in reader if obj['evidences'] != [] and obj['evidences'][0][0] != 0]
# return self._create_fever_examples(obj_list, "dev")
# else:
# path_ = "{}/politifact/{}/valid{}.tsv".format(data_dir, self.subtask, data_source)
# print("loading from {}".format(path_))
# return self._create_examples(self._read_tsv(os.path.join(data_dir, path_)), "dev")
# # return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev{}.tsv".format(self.data_source))), "dev")
def get_test_examples(self, data_dir):
if self.has_evidence:
if self.fever:
path_ = "{}/naacl/fever_test_for_bert.jsonl".format(data_dir)
elif self.liar:
path_ = "{}/politifact/{}/liar-plus_test_v3.jsonl".format(data_dir, self.subtask)
# path_ = '/home/nayeon/covid19_factcheck/data/liar-plus_test_v3_justification_top1_naacl.jsonl'
else:
if any([self.use_credit, self.use_metainfo, self.use_creditscore, self.use_ppl]):
path_ = "{}/politifact/{}/test_evidence_meta_fever_v4a.jsonl".format(data_dir, self.subtask)
else:
path_ = "{}/politifact/{}/test_evidence_meta_fever_v4a.jsonl".format(data_dir, self.subtask)
print("loading from {}".format(path_))
if self.few_shot and self.fever:
with jsonlines.open(path_) as reader:
obj_list = [obj for obj in reader if obj['label'] != 'REFUTES']
else:
with jsonlines.open(path_) as reader:
obj_list = [obj for obj in reader]
return self._create_examples_with_evidences(obj_list, "test")
else:
if self.fever:
path_ = "{}/naacl/fever_test_for_bert_w_ppl.jsonl".format(data_dir)
# path_ = "{}/naacl/fever_test_for_bert_w_ppl_{}_test.jsonl".format(data_dir, self.cross_validation)
with jsonlines.open(path_) as reader:
obj_list = [obj for obj in reader if obj['evidences'] != [] and obj['evidences'][0][0] != 0]
return self._create_fever_examples(obj_list, "test")
else:
path_ = "{}/politifact/{}/test{}.tsv".format(data_dir, self.subtask, data_source)
print("loading from {}".format(path_))
return self._create_examples(self._read_tsv(os.path.join(data_dir, path_)), "test")
def get_labels(self):
"""See base class."""
return self.labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training, dev and test sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = None if set_type == "test" else line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def _create_examples_with_evidences(self, obj_list, set_type, evidence_option='concat'):
examples = []
for (i, obj) in enumerate(obj_list):
try:
guid = "%s-%s" % (set_type, obj['claim_id'])
except:
guid = "%s-%s" % (set_type, obj['id'])
text_a = obj['claim']
if evidence_option == 'concat':
self.is_t3 = False
if self.is_t3:
text_b = " ".join([e_tuple[0] for e_tuple in obj['evidences'][:3]])
else:
text_b = obj['evidences'][0][0]
elif evidence_option == 'use_body':
raise NotImplementedError
elif evidence_option == 'separate_evidences':
raise NotImplementedError
label = obj['label']
if self.is_binary:
# map to 6 label to binary label
label = self.multi2binary[label]
# print(text_a)
# print(text_b)
# print(label)
# exit(0)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def load_full_liar_with_ppl(self, data_path, ppl_result_path):
with jsonlines.open(data_path) as reader:
og_objs = [obj for obj in reader]
ppl_results = np.load(ppl_result_path, allow_pickle=True)
all_objs = {
'true': [],
'false': [],
'_': []
}
for obj, ppl in zip(og_objs, ppl_results):
label = self.multi2binary[obj['label']]
if 'fever' in data_path:
claim_id = obj['id']
else:
claim_id = obj['claim_id']
claim = obj['claim']
evs = obj['evidences'][:3]
ppl = ppl['perplexity']
new_objs = {'ppl': ppl, 'label': label, 'claim': claim, 'evidences': evs, 'claim_id': claim_id}
all_objs[label].append(new_objs)
return all_objs
factcheck_processors = {
'gpt2baseline': PolifactProcessor,
'politifact': PolifactProcessor,
# 'fusion': FusionProcessor
}
class DatasetForClassification(Dataset):
def __init__(self, args, tokenizer: PreTrainedTokenizer, phase: str, local_rank=-1):
self.tokenizer = tokenizer
self.labels = ["true", "false"]
self.label_map = {label: i for i, label in enumerate(self.labels)}
processor = PolifactProcessor(args)
if phase == 'train':
self.examples = processor.get_train_examples(args.data_dir)
elif phase == 'dev' or 'test':
self.examples = processor.get_train_examples(args.data_dir)
# self.examples = processor.get_dev_examples(args.data_dir)
# def fever_data_cleaning(self, sent):
# sent = sent.replace('-LRB-', '(')
# sent = sent.replace('-RRB-', ')')
# sent = sent.replace('-LSB-', '[')
# sent = sent.replace('-RSB-', ']')
# return sent
def convert_claim_ev(self, example):
example = example[0]
single_encoding = self.tokenizer.encode_plus(
(example.text_a, example.text_b),
# max_length=args.max_seq_length,
# truncation=True,
add_special_tokens=True,
pad_token=self.tokenizer.pad_token_id,
return_token_type_ids=False
)
input_ids = torch.tensor(single_encoding['input_ids'], dtype=torch.long)
labels = torch.tensor(self.label_map[example.label], dtype=torch.long)
return input_ids, labels
def __len__(self):
return len(self.examples)
def __getitem__(self, i) -> torch.Tensor:
# is_testing_with_claim_only = False
# if is_testing_with_claim_only:
# examples = self.just_input([self.ev_claim_tuples[i]])
# return torch.tensor(examples, dtype=torch.long)
input_ids, label = self.convert_claim_ev([self.examples[i]])
return (torch.tensor(input_ids, dtype=torch.long), torch.tensor(label, dtype=torch.long))
|
#2裁切框為小圖片
import cv2
import os
from re import split
def pos():
txt_file = open(r'./contours.txt', 'r')
read = txt_file.readlines() #讀取內容
count = len(read) #讀取行數
# print(read, count)
pos = [[0]*8 for i in range(count)]
for i in range(count):
line = read[i]
line= line[5:-2]
pos[i] = split(r'[,\s]\s*', line) #切割值
# print(pos[i])
txt_file.close()
return pos
def crop(pos):
img = cv2.imread(r"./rectangle.jpg")
dir = r'./Numdata/'
if os.path.isdir(dir):
filelist = os.listdir(dir)
for file in filelist:
os.remove(dir+file)
else:
os.mkdir(dir)
for i in range(len(pos)):
x = int(pos[i][0])+15
y = int(pos[i][1])+15
w = int(pos[i][6])-15
h = int(pos[i][7])-15
# print('{} {} {} {}'.format(x, y, w, h))
crop_img = img[y:h, x:w]
write_name = dir + str(x)+','+str(y)+'.jpg'
cv2.imwrite(write_name, crop_img)
if __name__ == "__main__":
crop(pos())
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Module contains code to download data from TheLatin Libary.com
Example:
$ python3 latin_downloader.py
"""
import collections
import io
import os
from urllib.parse import urlparse
import requests
from bs4 import BeautifulSoup
from pybloomfilter import BloomFilter
from phyllo.phyllo_logger import logger
THELATINLIBRARY = "http://www.thelatinlibrary.com"
def latin_downloader():
"""Downloads all collections and saves it locally."""
home_page = "http://www.thelatinlibrary.com/index.html"
# A bloomfilter http://pybloomfiltermmap3.readthedocs.io/en/latest/ref.html
visited = BloomFilter(10000000, 0.01, 'thelatinlibrary.bloom')
to_visit = collections.deque()
to_visit.append(home_page)
# Main Loop
while len(to_visit) > 0:
# Get the next list of pages
pageurl = to_visit.popleft()
page = requests.get(pageurl)
if not page.ok:
logger.error("Couldn't find url. {}".format(pageurl))
# page.raise_for_status()
if page.text in visited or page.url in visited:
continue
soup = BeautifulSoup(page.text, "lxml")
# Save the page to a file
url = urlparse(page.url)
# Removing the first path before joining
if url.path.startswith("/"):
fileloc = os.path.join(url.netloc, url.path[1:])
else:
fileloc = os.path.join(url.netloc, url.path)
os.makedirs(os.path.dirname(fileloc), mode=0o755, exist_ok=True)
with io.open(fileloc, mode='w', encoding='utf-16') as newfile:
logger.info("Created: {}".format(fileloc))
newfile.write(soup.text)
# Get the next pages
for link in soup.find_all('a'):
href = link.get('href')
# No empty or mail links
if href is None or len(href) == 0 or href.startswith('mailto:'):
continue
# Prevent non-local links e.g. http://www.apple.com
if "http" in href and "thelatinlibrary" not in href:
continue
# No pdf or doc or docx fimes
if href.endswith("pdf") or href.endswith("doc") or\
href.endswith("docx") or href.endswith("zip") or\
href.endswith("jpg"):
continue
# No local links, we already have the page
if href.startswith("#"):
continue
# Annomalies
if href in ("78b", "ovid/ovid/ovid.ponto.shtml", "bib.html",
"brevechronicon.html"):
continue
# Remove absolute paths
if href.startswith('/'):
href = href[1:]
if "thelatinlibrary" in href:
newpageurl = href
else:
newpageurl = os.path.join(THELATINLIBRARY, href or "")
# Redirect to a specific index.html
if href.endswith('/'):
href = "{}index.html".format(href)
logger.info("expanded href to: {}".format(href))
# More anomolies
if href in ["medieval"]:
href = "{}/index.html".format(href)
if newpageurl not in visited:
to_visit.append(newpageurl)
logger.info("page->newpage: {} {}".format(pageurl, newpageurl))
# Add to the bloom table last
visited.add(page.text)
# Add the link too
visited.add(page.url)
if __name__ == '__main__':
latin_downloader()
|
#!/usr/bin/python
import argparse
import ast
import atexit
import getpass
import json
import os
import re
import requests
import shlex
import subprocess
import sys
import time
import uuid
from docker import Client
OVN_REMOTE = ""
OVN_BRIDGE = "br-int"
def call_popen(cmd):
child = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = child.communicate()
if child.returncode:
raise RuntimeError("Fatal error executing %s" % (cmd))
if len(output) == 0 or output[0] == None:
output = ""
else:
output = output[0].strip()
return output
def call_prog(prog, args_list):
cmd = [prog, "--timeout=5", "-vconsole:off"] + args_list
return call_popen(cmd)
def ovs_vsctl(args):
return call_prog("ovs-vsctl", shlex.split(args))
def ovn_nbctl(args):
args_list = shlex.split(args)
database_option = "%s=%s" % ("--db", OVN_REMOTE)
args_list.insert(0, database_option)
return call_prog("ovn-nbctl", args_list)
def plugin_init(args):
pass
def get_annotations(pod_name, namespace):
api_server = ovs_vsctl("--if-exists get open_vswitch . "
"external-ids:api_server").strip('"')
if not api_server:
return None
url = "http://%s/api/v1/pods" % (api_server)
response = requests.get("http://0.0.0.0:8080/api/v1/pods")
if response:
pods = response.json()['items']
else:
return None
for pod in pods:
if (pod['metadata']['namespace'] == namespace and
pod['metadata']['name'] == pod_name):
annotations = pod['metadata'].get('annotations', "")
if annotations:
return annotations
else:
return None
def associate_security_group(lport_id, security_group_id):
pass
def get_ovn_remote():
try:
global OVN_REMOTE
OVN_REMOTE = ovs_vsctl("get Open_vSwitch . "
"external_ids:ovn-remote").strip('"')
except Exception as e:
error = "failed to fetch ovn-remote (%s)" % (str(e))
def plugin_setup(args):
ns = args.k8_args[0]
pod_name = args.k8_args[1]
container_id = args.k8_args[2]
get_ovn_remote()
client = Client(base_url='unix://var/run/docker.sock')
try:
inspect = client.inspect_container(container_id)
pid = inspect["State"]["Pid"]
ip_address = inspect["NetworkSettings"]["IPAddress"]
netmask = inspect["NetworkSettings"]["IPPrefixLen"]
mac = inspect["NetworkSettings"]["MacAddress"]
gateway_ip = inspect["NetworkSettings"]["Gateway"]
except Exception as e:
error = "failed to get container pid and ip address (%s)" % (str(e))
sys.exit(error)
if not pid:
sys.exit("failed to fetch the pid")
netns_dst = "/var/run/netns/%s" % (pid)
if not os.path.isfile(netns_dst):
netns_src = "/proc/%s/ns/net" % (pid)
command = "ln -s %s %s" % (netns_src, netns_dst)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "failed to create the netns link"
sys.exit(error)
# Delete the existing veth pair
command = "ip netns exec %s ip link del eth0" % (pid)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "failed to delete the default veth pair"
sys.stderr.write(error)
veth_outside = container_id[0:15]
veth_inside = container_id[0:13] + "_c"
command = "ip link add %s type veth peer name %s" \
% (veth_outside, veth_inside)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "Failed to create veth pair (%s)" % (str(e))
sys.exit(error)
# Up the outer interface
command = "ip link set %s up" % veth_outside
try:
call_popen(shlex.split(command))
except Exception as e:
error = "Failed to admin up veth_outside (%s)" % (str(e))
sys.exit(error)
# Move the inner veth inside the container
command = "ip link set %s netns %s" % (veth_inside, pid)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "Failed to move veth inside (%s)" % (str(e))
sys.exit(error)
# Change the name of veth_inside to eth0
command = "ip netns exec %s ip link set dev %s name eth0" \
% (pid, veth_inside)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "Failed to change name to eth0 (%s)" % (str(e))
sys.exit(error)
# Up the inner interface
command = "ip netns exec %s ip link set eth0 up" % (pid)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "Failed to admin up veth_inside (%s)" % (str(e))
sys.exit(error)
# Set the mtu to handle tunnels
command = "ip netns exec %s ip link set dev eth0 mtu %s" \
% (pid, 1450)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "Failed to set mtu (%s)" % (str(e))
sys.exit(error)
# Set the ip address
command = "ip netns exec %s ip addr add %s/%s dev eth0" \
% (pid, ip_address, netmask)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "Failed to set ip address (%s)" % (str(e))
sys.exit(error)
# Set the mac address
command = "ip netns exec %s ip link set dev eth0 address %s" % (pid, mac)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "Failed to set mac address (%s)" % (str(e))
sys.exit(error)
# Set the gateway
command = "ip netns exec %s ip route add default via %s" \
% (pid, gateway_ip)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "Failed to set gateway (%s)" % (str(e))
sys.exit(error)
# Get the logical switch
try:
lswitch = ovs_vsctl("--if-exists get open_vswitch . "
"external_ids:lswitch").strip('"')
if not lswitch:
error = "No lswitch created for this host"
sys.exit(error)
except Exception as e:
error = "Failed to get external_ids:lswitch (%s)" % (str(e))
sys.exit(error)
# Create a logical port
try:
ovn_nbctl("lport-add %s %s" % (lswitch, container_id))
except Exception as e:
error = "lport-add %s" % (str(e))
sys.exit(error)
# Set the ip address and mac address
try:
ovn_nbctl("lport-set-addresses %s \"%s %s\""
% (container_id, mac, ip_address))
except Exception as e:
error = "lport-set-addresses %s" % (str(e))
sys.exit(error)
# Add the port to a OVS bridge and set the vlan
try:
ovs_vsctl("add-port %s %s -- set interface %s "
"external_ids:attached_mac=%s external_ids:iface-id=%s "
"external_ids:ip_address=%s"
% (OVN_BRIDGE, veth_outside, veth_outside, mac,
container_id, ip_address))
except Exception as e:
ovn_nbctl("lport-del %s" % container_id)
error = "failed to create a OVS port. (%s)" % (str(e))
sys.exit(error)
annotations = get_annotations(ns, pod_name)
if annotations:
security_group = annotations.get("security-group", "")
if security_group:
associate_security_group(lport, security_group)
def plugin_status(args):
ns = args.k8_args[0]
pod_name = args.k8_args[1]
container_id = args.k8_args[2]
veth_outside = container_id[0:15]
ip_address = ovs_vsctl("--if-exists get interface %s "
"external_ids:ip_address"
% (veth_outside)).strip('"')
if ip_address:
style = {"ip": ip_address}
print json.dumps(style)
def disassociate_security_group(lport_id):
pass
def plugin_teardown(args):
ns = args.k8_args[0]
pod_name = args.k8_args[1]
container_id = args.k8_args[2]
get_ovn_remote()
veth_outside = container_id[0:15]
command = "ip link delete %s" % (veth_outside)
try:
call_popen(shlex.split(command))
except Exception as e:
error = "Failed to delete veth_outside (%s)" % (str(e))
sys.stderr.write(error)
annotations = get_annotations(ns, pod_name)
if annotations:
security_group = annotations.get("security-group", "")
if security_group:
disassociate_security_group(container_id)
try:
ovn_nbctl("lport-del %s" % container_id)
except Exception as e:
error = "failed to delete logical port (%s)" % (str(e))
sys.stderr.write(error)
try:
ovs_vsctl("del-port %s" % (veth_outside))
except Exception as e:
error = "failed to delete OVS port (%s)" % (veth_outside)
sys.stderr.write(error)
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='Subcommands',
dest='command_name')
# Parser for sub-command init
parser_plugin_init = subparsers.add_parser('init', help="kubectl init")
parser_plugin_init.set_defaults(func=plugin_init)
# Parser for sub-command setup
parser_plugin_setup = subparsers.add_parser('setup',
help="setup pod networking")
parser_plugin_setup.add_argument('k8_args', nargs=3,
help='arguments passed by kubectl')
parser_plugin_setup.set_defaults(func=plugin_setup)
# Parser for sub-command status
parser_plugin_status = subparsers.add_parser('status',
help="pod status")
parser_plugin_status.add_argument('k8_args', nargs=3,
help='arguments passed by kubectl')
parser_plugin_status.set_defaults(func=plugin_status)
# Parser for sub-command teardown
parser_plugin_teardown = subparsers.add_parser('teardown',
help="pod teardown")
parser_plugin_teardown.add_argument('k8_args', nargs=3,
help='arguments passed by kubectl')
parser_plugin_teardown.set_defaults(func=plugin_teardown)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
from functools import partial
from mslice.util.qt import QtWidgets
from mslice.util.qt.QtCore import Qt
import os.path as path
import matplotlib.colors as colors
from matplotlib.lines import Line2D
from mslice.models.colors import to_hex
from mslice.presenters.plot_options_presenter import SlicePlotOptionsPresenter
from mslice.presenters.quick_options_presenter import quick_options
from mslice.models.workspacemanager.workspace_provider import get_workspace_handle
from mslice.plotting.plot_window.iplot import IPlot
from mslice.plotting.plot_window.interactive_cut import InteractiveCut
from mslice.plotting.plot_window.plot_options import SlicePlotOptions
class SlicePlot(IPlot):
def __init__(self, figure_manager, slice_plotter_presenter, workspace_name):
self.manager = figure_manager
self.plot_window = figure_manager.window
self._canvas = self.plot_window.canvas
self._slice_plotter_presenter = slice_plotter_presenter
self.ws_name = workspace_name
self._arb_nuclei_rmm = None
self._cif_file = None
self._cif_path = None
self._legend_dict = {}
# Interactive cuts
self.icut = None
self.icut_event = [None, None]
self.setup_connections(self.plot_window)
def setup_connections(self, plot_figure):
plot_figure.action_interactive_cuts.setVisible(True)
plot_figure.action_interactive_cuts.triggered.connect(self.toggle_interactive_cuts)
plot_figure.action_save_cut.setVisible(False)
plot_figure.action_save_cut.triggered.connect(self.save_icut)
plot_figure.action_flip_axis.setVisible(False)
plot_figure.action_flip_axis.triggered.connect(self.flip_icut)
plot_figure.action_sqe.triggered.connect(
partial(self.show_intensity_plot, plot_figure.action_sqe,
self._slice_plotter_presenter.show_scattering_function, False))
plot_figure.action_chi_qe.triggered.connect(
partial(self.show_intensity_plot, plot_figure.action_chi_qe,
self._slice_plotter_presenter.show_dynamical_susceptibility, True))
plot_figure.action_chi_qe_magnetic.triggered.connect(
partial(self.show_intensity_plot, plot_figure.action_chi_qe_magnetic,
self._slice_plotter_presenter.show_dynamical_susceptibility_magnetic, True))
plot_figure.action_d2sig_dw_de.triggered.connect(
partial(self.show_intensity_plot, plot_figure.action_d2sig_dw_de,
self._slice_plotter_presenter.show_d2sigma, False))
plot_figure.action_symmetrised_sqe.triggered.connect(
partial(self.show_intensity_plot, plot_figure.action_symmetrised_sqe,
self._slice_plotter_presenter.show_symmetrised, True))
plot_figure.action_gdos.triggered.connect(
partial(self.show_intensity_plot, plot_figure.action_gdos, self._slice_plotter_presenter.show_gdos, True))
plot_figure.action_hydrogen.triggered.connect(
partial(self.toggle_overplot_line, 1, True))
plot_figure.action_deuterium.triggered.connect(
partial(self.toggle_overplot_line, 2, True))
plot_figure.action_helium.triggered.connect(
partial(self.toggle_overplot_line, 4, True))
plot_figure.action_arbitrary_nuclei.triggered.connect(self.arbitrary_recoil_line)
plot_figure.action_aluminium.triggered.connect(
partial(self.toggle_overplot_line, 'Aluminium', False))
plot_figure.action_copper.triggered.connect(
partial(self.toggle_overplot_line, 'Copper', False))
plot_figure.action_niobium.triggered.connect(
partial(self.toggle_overplot_line, 'Niobium', False))
plot_figure.action_tantalum.triggered.connect(
partial(self.toggle_overplot_line, 'Tantalum', False))
plot_figure.action_cif_file.triggered.connect(partial(self.cif_file_powder_line))
def disconnect(self, plot_window):
plot_window.action_interactive_cuts.triggered.disconnect()
plot_window.action_save_cut.triggered.disconnect()
plot_window.action_flip_axis.triggered.disconnect()
plot_window.action_sqe.triggered.disconnect()
plot_window.action_chi_qe.triggered.disconnect()
plot_window.action_chi_qe_magnetic.triggered.disconnect()
plot_window.action_d2sig_dw_de.triggered.disconnect()
plot_window.action_symmetrised_sqe.triggered.disconnect()
plot_window.action_gdos.triggered.disconnect()
plot_window.action_hydrogen.triggered.disconnect()
plot_window.action_deuterium.triggered.disconnect()
plot_window.action_helium.triggered.disconnect()
plot_window.action_arbitrary_nuclei.triggered.disconnect()
plot_window.action_aluminium.triggered.disconnect()
plot_window.action_copper.triggered.disconnect()
plot_window.action_niobium.triggered.disconnect()
plot_window.action_tantalum.triggered.disconnect()
plot_window.action_cif_file.triggered.disconnect()
def window_closing(self):
# nothing to do
pass
def plot_options(self):
new_config = SlicePlotOptionsPresenter(SlicePlotOptions(), self).get_new_config()
if new_config:
self._canvas.draw()
def plot_clicked(self, x, y):
bounds = self.calc_figure_boundaries()
if bounds['x_label'] < y < bounds['title']:
if bounds['y_label'] < x < bounds['colorbar_label']:
if y < bounds['x_range']:
quick_options('x_range', self)
elif x < bounds['y_range']:
quick_options('y_range', self)
elif x > bounds['colorbar_range']:
quick_options('colorbar_range', self, self.colorbar_log)
self._canvas.draw()
def object_clicked(self, target):
if target in self._legend_dict:
quick_options(self._legend_dict[target], self)
else:
quick_options(target, self)
self.update_legend()
self._canvas.draw()
def update_legend(self):
lines = []
labels = []
axes = self._canvas.figure.gca()
line_artists = [artist for artist in axes.get_children() if isinstance(artist, Line2D)]
for line in line_artists:
if str(line.get_linestyle()) != 'None' and line.get_label() != '':
lines.append(line)
labels.append(line.get_label())
if len(lines) > 0:
legend = axes.legend(lines, labels, fontsize='small')
for legline, line in zip(legend.get_lines(), lines):
legline.set_picker(5)
self._legend_dict[legline] = line
for label, line in zip(legend.get_texts(), lines):
label.set_picker(5)
self._legend_dict[label] = line
else:
axes.legend_ = None # remove legend
if self._canvas.manager._plot_handler.icut is not None:
self._canvas.manager._plot_handler.icut.rect.ax = axes
def change_axis_scale(self, colorbar_range, logarithmic):
current_axis = self._canvas.figure.gca()
colormesh = current_axis.collections[0]
vmin, vmax = colorbar_range
if logarithmic:
label = self.colorbar_label
colormesh.colorbar.remove()
if vmin <= float(0):
vmin = 0.001
colormesh.set_clim((vmin, vmax))
norm = colors.LogNorm(vmin, vmax)
colormesh.set_norm(norm)
self._canvas.figure.colorbar(colormesh)
self.colorbar_label = label
else:
label = self.colorbar_label
colormesh.colorbar.remove()
colormesh.set_clim((vmin, vmax))
norm = colors.Normalize(vmin, vmax)
colormesh.set_norm(norm)
self._canvas.figure.colorbar(colormesh)
self.colorbar_label = label
def get_line_options(self, target):
line_options = {
'label': target.get_label(),
'legend': None,
'shown': None,
'color': to_hex(target.get_color()),
'style': target.get_linestyle(),
'width': str(int(target.get_linewidth())),
'marker': target.get_marker(),
'error_bar': None
}
return line_options
def set_line_options(self, line, line_options):
line.set_label(line_options['label'])
line.set_linestyle(line_options['style'])
line.set_marker(line_options['marker'])
line.set_color(line_options['color'])
line.set_linewidth(line_options['width'])
def calc_figure_boundaries(self):
fig_x, fig_y = self._canvas.figure.get_size_inches() * self._canvas.figure.dpi
bounds = {}
bounds['y_label'] = fig_x * 0.07
bounds['y_range'] = fig_x * 0.12
bounds['colorbar_range'] = fig_x * 0.75
bounds['colorbar_label'] = fig_x * 0.86
bounds['title'] = fig_y * 0.9
bounds['x_range'] = fig_y * 0.09
bounds['x_label'] = fig_y * 0.05
return bounds
def toggle_overplot_line(self, key, recoil, checked, cif_file=None):
last_active_figure_number = None
if self.manager._current_figs._active_figure is not None:
last_active_figure_number = self.manager._current_figs.get_active_figure().number
self.manager.report_as_current()
if checked:
self._slice_plotter_presenter.add_overplot_line(self.ws_name, key, recoil, cif_file)
else:
self._slice_plotter_presenter.hide_overplot_line(self.ws_name, key)
self.update_legend()
self._canvas.draw()
# Reset current active figure
if last_active_figure_number is not None:
self.manager._current_figs.set_figure_as_current(last_active_figure_number)
def arbitrary_recoil_line(self):
recoil = True
checked = self.plot_window.action_arbitrary_nuclei.isChecked()
if checked:
self._arb_nuclei_rmm, confirm = QtWidgets.QInputDialog.getInt(
self.plot_window, 'Arbitrary Nuclei', 'Enter relative mass:', min=1)
if confirm:
self.toggle_overplot_line(self._arb_nuclei_rmm, recoil, checked)
else:
self.plot_window.action_arbitrary_nuclei.setChecked(not checked)
else:
self.toggle_overplot_line(self._arb_nuclei_rmm, recoil, checked)
def cif_file_powder_line(self, checked):
if checked:
cif_path = QtWidgets.QFileDialog().getOpenFileName(self.plot_window, 'Open CIF file', '/home',
'Files (*.cif)')
cif_path = str(cif_path[0]) if isinstance(cif_path, tuple) else str(cif_path)
key = path.basename(cif_path).rsplit('.')[0]
self._cif_file = key
self._cif_path = cif_path
else:
key = self._cif_file
cif_path = None
if key:
recoil = False
self.toggle_overplot_line(key, recoil, checked, cif_file=cif_path)
def _reset_intensity(self):
options = self.plot_window.menu_intensity.actions()
for op in options:
op.setChecked(False)
def selected_intensity(self):
options = self.plot_window.menu_intensity.actions()
for op in options:
if op.isChecked():
return op
def set_intensity(self, intensity):
self._reset_intensity()
intensity.setChecked(True)
def show_intensity_plot(self, action, slice_plotter_method, temp_dependent):
last_active_figure_number = None
if self.manager._current_figs._active_figure is not None:
last_active_figure_number = self.manager._current_figs.get_active_figure().number
self.manager.report_as_current()
if action.isChecked():
previous = self.selected_intensity()
self.set_intensity(action)
cbar_log = self.colorbar_log
cbar_range = self.colorbar_range
x_range = self.x_range
y_range = self.y_range
title = self.title
if temp_dependent:
if not self._run_temp_dependent(slice_plotter_method, previous):
return
else:
slice_plotter_method(self.ws_name)
self.change_axis_scale(cbar_range, cbar_log)
self.x_range = x_range
self.y_range = y_range
self.title = title
self.manager.update_grid()
self._update_lines()
self._canvas.draw()
else:
action.setChecked(True)
# Reset current active figure
if last_active_figure_number is not None:
self.manager._current_figs.set_figure_as_current(last_active_figure_number)
def _run_temp_dependent(self, slice_plotter_method, previous):
try:
slice_plotter_method(self.ws_name)
except ValueError: # sample temperature not yet set
try:
temp_value, field = self.ask_sample_temperature_field(str(self.ws_name))
except RuntimeError: # if cancel is clicked, go back to previous selection
self.set_intensity(previous)
return False
if field:
self._slice_plotter_presenter.add_sample_temperature_field(temp_value)
self._slice_plotter_presenter.update_sample_temperature(self.ws_name)
else:
try:
temp_value = float(temp_value)
if temp_value < 0:
raise ValueError
except ValueError:
self.plot_window.error_box("Invalid value entered for sample temperature. Enter a value in Kelvin \
or a sample log field.")
self.set_intensity(previous)
return False
else:
self._slice_plotter_presenter.set_sample_temperature(self.ws_name, temp_value)
slice_plotter_method(self.ws_name)
return True
def ask_sample_temperature_field(self, ws_name):
text = 'Sample Temperature not found. Select the sample temperature field or enter a value in Kelvin:'
ws = get_workspace_handle(ws_name)
try:
keys = ws.raw_ws.run().keys()
except AttributeError:
keys = ws.raw_ws.getExperimentInfo(0).run().keys()
temp_field, confirm = QtWidgets.QInputDialog.getItem(self.plot_window, 'Sample Temperature', text, keys)
if not confirm:
raise RuntimeError("sample_temperature_dialog cancelled")
else:
return str(temp_field), temp_field in keys
def _update_lines(self):
""" Updates the powder/recoil overplots lines when intensity type changes """
lines = {self.plot_window.action_hydrogen: [1, True, ''],
self.plot_window.action_deuterium: [2, True, ''],
self.plot_window.action_helium: [4, True, ''],
self.plot_window.action_arbitrary_nuclei: [self._arb_nuclei_rmm, True, ''],
self.plot_window.action_aluminium: ['Aluminium', False, ''],
self.plot_window.action_copper: ['Copper', False, ''],
self.plot_window.action_niobium: ['Niobium', False, ''],
self.plot_window.action_tantalum: ['Tantalum', False, ''],
self.plot_window.action_cif_file: [self._cif_file, False, self._cif_path]}
for line in lines:
if line.isChecked():
self._slice_plotter_presenter.add_overplot_line(self.ws_name, *lines[line])
self.update_legend()
self._canvas.draw()
def toggle_interactive_cuts(self):
self.toggle_icut_button()
self.toggle_icut()
def toggle_icut_button(self):
if not self.icut:
self.manager.picking_connected(False)
if self.plot_window.action_zoom_in.isChecked():
self.plot_window.action_zoom_in.setChecked(False)
self.plot_window.action_zoom_in.triggered.emit(False) # turn off zoom
self.plot_window.action_zoom_in.setEnabled(False)
self.plot_window.action_keep.trigger()
self.plot_window.action_keep.setEnabled(False)
self.plot_window.action_make_current.setEnabled(False)
self.plot_window.action_save_cut.setVisible(True)
self.plot_window.action_flip_axis.setVisible(True)
self._canvas.setCursor(Qt.CrossCursor)
else:
self.manager.picking_connected(True)
self.plot_window.action_zoom_in.setEnabled(True)
self.plot_window.action_keep.setEnabled(True)
self.plot_window.action_make_current.setEnabled(True)
self.plot_window.action_save_cut.setVisible(False)
self.plot_window.action_flip_axis.setVisible(False)
self._canvas.setCursor(Qt.ArrowCursor)
def toggle_icut(self):
if self.icut is not None:
self.icut.clear()
self.icut = None
else:
self.icut = InteractiveCut(self, self._canvas, self.ws_name)
def save_icut(self):
self.icut.save_cut()
def flip_icut(self):
self.icut.flip_axis()
def update_workspaces(self):
self._slice_plotter_presenter.update_displayed_workspaces()
@property
def colorbar_label(self):
return self._canvas.figure.get_axes()[1].get_ylabel()
@colorbar_label.setter
def colorbar_label(self, value):
self._canvas.figure.get_axes()[1].set_ylabel(value, labelpad=20, rotation=270, picker=5)
@property
def colorbar_range(self):
return self._canvas.figure.gca().collections[0].get_clim()
@colorbar_range.setter
def colorbar_range(self, value):
self.change_axis_scale(value, self.colorbar_log)
@property
def colorbar_log(self):
return isinstance(self._canvas.figure.gca().collections[0].norm, colors.LogNorm)
@colorbar_log.setter
def colorbar_log(self, value):
self.change_axis_scale(self.colorbar_range, value)
@property
def title(self):
return self.manager.title
@title.setter
def title(self, value):
self.manager.title = value
@property
def x_label(self):
return self.manager.x_label
@x_label.setter
def x_label(self, value):
self.manager.x_label = value
@property
def y_label(self):
return self.manager.y_label
@y_label.setter
def y_label(self, value):
self.manager.y_label = value
@property
def x_range(self):
return self.manager.x_range
@x_range.setter
def x_range(self, value):
self.manager.x_range = value
@property
def y_range(self):
return self.manager.y_range
@y_range.setter
def y_range(self, value):
self.manager.y_range = value
@property
def x_grid(self):
return self.manager.x_grid
@x_grid.setter
def x_grid(self, value):
self.manager.x_grid = value
@property
def y_grid(self):
return self.manager.y_grid
@y_grid.setter
def y_grid(self, value):
self.manager.y_grid = value
|
# 수열 A에서 정수 X보다 작은 수 구하기
N, X = map(int, input().split()) # N은 A의 정수 개수
A = list(map(int, input().split()))
def less_than(A, X):
less_than = []
for i in A:
if X > i:
less_than.append(str(i))
return less_than
print(" ".join(less_than(A, X)))
|
from twisted.internet import reactor
class Client(object):
id = property(lambda self: self._id)
meta = property(lambda self: self._meta)
comet_server = property(lambda self: self._comet_server)
def __init__(self, comet_server, id, timeout_cb, meta=None):
self._comet_server = comet_server
self._id = id
self._meta = meta
self._timeout_delayed_call = None
self.timeout_cb = timeout_cb
self.channels = dict()
self.ping()
def ping(self):
config = self.comet_server.config
if (
self._timeout_delayed_call is None
or not self._timeout_delayed_call.active()
):
self._timeout_delayed_call = reactor.callLater(
config.client_session_timeout, self.timeout
)
else:
self._timeout_delayed_call.reset(config.client_session_timeout)
def cancel_timeout_delayed_call(self):
if (
self._timeout_delayed_call is not None
and self._timeout_delayed_call.active()
):
self._timeout_delayed_call.cancel()
self._timeout_delayed_call = None
def timeout(self):
self.cancel_timeout_delayed_call()
self.timeout_cb(self, self.teardown)
def teardown(self):
self.cancel_timeout_delayed_call()
|
# -*- coding: utf-8 -*-
"""
test.t_controlbeast.test_CB
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2013 by the ControlBeast team, see AUTHORS.
:license: ISC, see LICENSE for details.
"""
from unittest import TestCase
from controlbeast import get_version
class TestCbBase(TestCase):
"""
Class providing unit tests for the ControlBeast module
**Covered test cases:**
============== ========================================================================================
Test Case Description
============== ========================================================================================
01 Get the ControlBeast version information.
============== ========================================================================================
"""
def test_01(self):
"""
Test Case 01:
Get the ControlBeast version information.
Test is passed if returned version information is a string.
"""
self.assertIsInstance(get_version(), str)
|
species(
label = '[CH2]C(CC)C([O])=O(873)',
structure = SMILES('[CH2]C(CC)C([O])=O'),
E0 = (-86.1147,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,200,800,960,1120,1280,1440,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.18859,0.0680442,-8.15552e-05,7.11743e-08,-2.73015e-11,-10261.9,25.6609], Tmin=(100,'K'), Tmax=(751.672,'K')), NASAPolynomial(coeffs=[3.71807,0.0464834,-2.2365e-05,4.34135e-09,-3.05204e-13,-10413.4,15.7019], Tmin=(751.672,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-86.1147,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CCOJ) + radical(CJC(C)C=O)"""),
)
species(
label = 'butene1(127)',
structure = SMILES('C=CCC'),
E0 = (-16.4325,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,252.555],'cm^-1')),
HinderedRotor(inertia=(0.178654,'amu*angstrom^2'), symmetry=1, barrier=(7.72883,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.185589,'amu*angstrom^2'), symmetry=1, barrier=(7.72103,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (56.1063,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2968.28,'J/mol'), sigma=(5.176,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.58773,0.0232778,1.93412e-05,-3.55496e-08,1.36906e-11,-1918.73,14.5751], Tmin=(100,'K'), Tmax=(1007.28,'K')), NASAPolynomial(coeffs=[7.20517,0.0236362,-9.0315e-06,1.65393e-09,-1.16019e-13,-3797.34,-12.4426], Tmin=(1007.28,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-16.4325,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), label="""butene1""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'CO2(13)',
structure = SMILES('O=C=O'),
E0 = (-403.131,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([459.166,1086.67,1086.68,2300.05],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (44.0095,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(1622.99,'J/mol'), sigma=(3.941,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.2779,0.00275783,7.12787e-06,-1.07855e-08,4.14228e-12,-48475.6,5.97856], Tmin=(100,'K'), Tmax=(988.185,'K')), NASAPolynomial(coeffs=[4.55071,0.00290728,-1.14643e-06,2.25798e-10,-1.69526e-14,-48986,-1.45662], Tmin=(988.185,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-403.131,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(62.3585,'J/(mol*K)'), label="""CO2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'CCC1CC1([O])[O](2669)',
structure = SMILES('CCC1CC1([O])[O]'),
E0 = (64.8136,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.46579,0.047136,-1.07989e-05,-1.13476e-08,5.35354e-12,7893.54,24.2627], Tmin=(100,'K'), Tmax=(1179.3,'K')), NASAPolynomial(coeffs=[10.6293,0.0329101,-1.41434e-05,2.66264e-09,-1.85749e-13,4560.17,-26.4266], Tmin=(1179.3,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(64.8136,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsH) + group(O2s-CsH) + group(Cs-CsCsCsH) + group(Cs-CsCsOsOs) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + ring(Cyclopropane) + radical(CC(C)(O)OJ) + radical(CC(C)(O)OJ)"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C=C(CC)C([O])=O(2670)',
structure = SMILES('C=C(CC)C([O])=O'),
E0 = (-94.1283,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,350,440,435,1725,180,180,309.061,494.05,1600,2880,3200],'cm^-1')),
HinderedRotor(inertia=(0.0983094,'amu*angstrom^2'), symmetry=1, barrier=(2.26033,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0983094,'amu*angstrom^2'), symmetry=1, barrier=(2.26033,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0983094,'amu*angstrom^2'), symmetry=1, barrier=(2.26033,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (99.1079,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.36313,0.040999,-1.8471e-05,2.88325e-09,-1.05335e-13,-11325.7,18.5557], Tmin=(100,'K'), Tmax=(2658.82,'K')), NASAPolynomial(coeffs=[36.9584,0.001565,-2.49043e-06,4.4755e-10,-2.40516e-14,-33116.6,-183.757], Tmin=(2658.82,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-94.1283,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cd-CdCs(CO)) + group(Cds-O2d(Cds-Cds)O2s) + group(Cds-CdsHH) + radical(CCOJ)"""),
)
species(
label = 'C2H5(29)',
structure = SMILES('C[CH2]'),
E0 = (107.874,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000,1190.6,1642.82,1642.96,3622.23,3622.39],'cm^-1')),
HinderedRotor(inertia=(0.866817,'amu*angstrom^2'), symmetry=1, barrier=(19.9298,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (29.0611,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2097.75,'J/mol'), sigma=(4.302,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.5, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.24186,-0.00356905,4.82667e-05,-5.85401e-08,2.25805e-11,12969,4.44704], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[4.32196,0.0123931,-4.39681e-06,7.0352e-10,-4.18435e-14,12175.9,0.171104], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(107.874,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(153.818,'J/(mol*K)'), label="""C2H5""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'tSPC_1553(806)',
structure = SMILES('C=CC([O])=O'),
E0 = (-95.7795,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,519.883,519.884,519.886,519.889,519.893,519.897],'cm^-1')),
HinderedRotor(inertia=(0.000623705,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (71.0547,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.66944,0.0212367,8.85353e-06,-2.77913e-08,1.22625e-11,-11464.5,15.0665], Tmin=(100,'K'), Tmax=(983.401,'K')), NASAPolynomial(coeffs=[10.0894,0.0103164,-3.86785e-06,7.48921e-10,-5.61021e-14,-13855.2,-25.3414], Tmin=(983.401,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-95.7795,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(178.761,'J/(mol*K)'), label="""tSPC_1553""", comment="""Thermo library: CBS_QB3_1dHR"""),
)
species(
label = '[O][C]=O(669)',
structure = SMILES('[O][C]=O'),
E0 = (31.9507,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1855,455,950],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (44.0095,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.90478,-0.000175995,8.15126e-06,-1.13656e-08,4.4768e-12,3848.25,8.04855], Tmin=(100,'K'), Tmax=(975.388,'K')), NASAPolynomial(coeffs=[5.59398,-0.00122084,7.11747e-07,-9.7712e-11,3.97995e-15,3238.91,-1.49318], Tmin=(975.388,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(31.9507,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(103.931,'J/(mol*K)'), comment="""Thermo library: Klippenstein_Glarborg2016 + radical(OJC=O) + radical((O)CJOH)"""),
)
species(
label = '[CH2][CH]CC(130)',
structure = SMILES('[CH2][CH]CC'),
E0 = (255.669,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3025,407.5,1350,352.5,2031.24],'cm^-1')),
HinderedRotor(inertia=(0.244974,'amu*angstrom^2'), symmetry=1, barrier=(5.63244,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00192352,'amu*angstrom^2'), symmetry=1, barrier=(5.63177,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.244928,'amu*angstrom^2'), symmetry=1, barrier=(5.63137,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (56.1063,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.98997,0.0287412,-9.51469e-06,4.19232e-10,1.90526e-13,30780.1,16.8971], Tmin=(100,'K'), Tmax=(2154.56,'K')), NASAPolynomial(coeffs=[12.4231,0.0182241,-7.06316e-06,1.16769e-09,-7.11818e-14,25091.4,-39.6212], Tmin=(2154.56,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(255.669,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(320.107,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(RCCJC) + radical(RCCJ)"""),
)
species(
label = 'CC[C](C)C([O])=O(2671)',
structure = SMILES('CC[C](C)C([O])=O'),
E0 = (-144.052,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,2750,2770,2790,2810,2830,2850,1350,1400,1450,1500,700,800,1000,1100,1350,1400,900,1100,360,370,350,200,800,960,1120,1280,1440,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.99547,0.0481278,-2.12317e-05,2.9978e-09,2.76379e-14,-17257,23.3201], Tmin=(100,'K'), Tmax=(2026.72,'K')), NASAPolynomial(coeffs=[18.1952,0.0254358,-1.13055e-05,1.99202e-09,-1.26989e-13,-25729.5,-70.9805], Tmin=(2026.72,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-144.052,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CCJ(C)CO) + radical(CCOJ)"""),
)
species(
label = 'C[CH]C(C)C([O])=O(2672)',
structure = SMILES('C[CH]C(C)C([O])=O'),
E0 = (-96.7234,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.92711,0.0496417,-2.49398e-05,5.3302e-09,-4.18591e-13,-11562.6,25.1593], Tmin=(100,'K'), Tmax=(2381.25,'K')), NASAPolynomial(coeffs=[23.3754,0.0180734,-7.86387e-06,1.33616e-09,-8.18532e-14,-23041.9,-99.5809], Tmin=(2381.25,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-96.7234,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CCJCC=O) + radical(CCOJ)"""),
)
species(
label = '[CH2][C](CC)C(=O)O(2673)',
structure = SMILES('[CH2][C](CC)C(=O)O'),
E0 = (-159.246,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.38598,0.0607249,-4.54771e-05,1.86185e-08,-3.32491e-12,-19061.7,25.6718], Tmin=(100,'K'), Tmax=(1244.6,'K')), NASAPolynomial(coeffs=[8.8328,0.0367917,-1.66327e-05,3.16809e-09,-2.21427e-13,-20915.3,-11.8843], Tmin=(1244.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-159.246,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CJC(C)C=O) + radical(CCJ(C)CO)"""),
)
species(
label = '[CH2]C([CH]C)C(=O)O(2674)',
structure = SMILES('[CH2]C([CH]C)C(=O)O'),
E0 = (-111.918,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.21622,0.063216,-5.17662e-05,2.33901e-08,-4.51827e-12,-13362.1,27.8921], Tmin=(100,'K'), Tmax=(1187.09,'K')), NASAPolynomial(coeffs=[9.78692,0.0343363,-1.52741e-05,2.89633e-09,-2.02306e-13,-15397,-14.9265], Tmin=(1187.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-111.918,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CJC(C)C=O) + radical(CCJCC=O)"""),
)
species(
label = '[CH2]CC(C)C([O])=O(2675)',
structure = SMILES('[CH2]CC(C)C([O])=O'),
E0 = (-91.3791,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.3999,0.0490637,-2.4485e-05,5.13122e-09,-4.01521e-13,-10947.5,22.6787], Tmin=(100,'K'), Tmax=(3265.76,'K')), NASAPolynomial(coeffs=[30.5728,0.0110687,-5.43139e-06,9.14608e-10,-5.36956e-14,-27488.6,-143.734], Tmin=(3265.76,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-91.3791,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CCOJ) + radical(RCCJ)"""),
)
species(
label = '[CH2]CC([CH2])C(=O)O(2676)',
structure = SMILES('[CH2]CC([CH2])C(=O)O'),
E0 = (-106.574,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.10273,0.0684402,-6.74645e-05,3.97863e-08,-1.01979e-11,-12717.5,27.6045], Tmin=(100,'K'), Tmax=(913.726,'K')), NASAPolynomial(coeffs=[8.04476,0.0380502,-1.75754e-05,3.3865e-09,-2.38734e-13,-13986.1,-5.26042], Tmin=(913.726,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-106.574,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(336.736,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CJC(C)C=O) + radical(RCCJ)"""),
)
species(
label = '[CH2]C(CC)[C]1OO1(2677)',
structure = SMILES('[CH2]C(CC)[C]1OO1'),
E0 = (270.703,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.04739,0.0518239,-9.0976e-07,-4.405e-08,2.37836e-11,32676.5,28.049], Tmin=(100,'K'), Tmax=(910.287,'K')), NASAPolynomial(coeffs=[16.4296,0.0190996,-4.44242e-06,6.16886e-10,-4.03844e-14,28431.5,-52.6503], Tmin=(910.287,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(270.703,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-OsCs) + group(O2s-OsCs) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + longDistanceInteraction_noncyclic(CsCs-ST) + group(Cs-CsOsOsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + ring(dioxirane) + radical(Cs_P) + radical(Isobutyl)"""),
)
species(
label = 'CCC1CO[C]1[O](2678)',
structure = SMILES('CCC1CO[C]1[O]'),
E0 = (77.799,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.66179,0.0442424,-1.04269e-06,-2.67933e-08,1.35044e-11,9447.84,24.7825], Tmin=(100,'K'), Tmax=(921.772,'K')), NASAPolynomial(coeffs=[8.81263,0.0316353,-1.05083e-05,1.73635e-09,-1.14308e-13,7346.85,-13.3793], Tmin=(921.772,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(77.799,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-CsCs) + group(O2s-CsH) + group(Cs-CsCsCsH) + group(Cs-CsCsHH) + group(Cs-CsOsOsH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + ring(Oxetane) + radical(Cs_P) + radical(CCOJ)"""),
)
species(
label = 'C=C(CC)C(=O)O(883)',
structure = SMILES('C=C(CC)C(=O)O'),
E0 = (-319.833,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.57897,0.05856,-5.08894e-05,2.86954e-08,-7.63048e-12,-38384.5,24.9497], Tmin=(100,'K'), Tmax=(846.972,'K')), NASAPolynomial(coeffs=[5.36873,0.0406621,-1.91922e-05,3.7461e-09,-2.66232e-13,-39026.5,7.29574], Tmin=(846.972,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-319.833,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cd-CdCs(CO)) + group(Cds-O2d(Cds-Cds)O2s) + group(Cds-CdsHH)"""),
)
species(
label = 'CH2(S)(23)',
structure = SMILES('[CH2]'),
E0 = (419.862,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1369.36,2789.41,2993.36],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.19195,-0.00230793,8.0509e-06,-6.60123e-09,1.95638e-12,50484.3,-0.754589], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.28556,0.00460255,-1.97412e-06,4.09548e-10,-3.34695e-14,50922.4,8.67684], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(419.862,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(S)""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2]C(C)C([O])=O(929)',
structure = SMILES('[CH2]C(C)C([O])=O'),
E0 = (-62.3345,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,180,180,180,739.862,739.874,739.876],'cm^-1')),
HinderedRotor(inertia=(0.00594471,'amu*angstrom^2'), symmetry=1, barrier=(2.30921,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00594351,'amu*angstrom^2'), symmetry=1, barrier=(2.30876,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.100426,'amu*angstrom^2'), symmetry=1, barrier=(2.30899,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (86.0892,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4058.35,'J/mol'), sigma=(6.30806,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=633.90 K, Pc=36.69 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.83726,0.0532436,-6.76164e-05,6.27682e-08,-2.48209e-11,-7424.74,21.086], Tmin=(100,'K'), Tmax=(773.357,'K')), NASAPolynomial(coeffs=[2.84218,0.0379816,-1.84936e-05,3.59455e-09,-2.52304e-13,-7279.21,18.442], Tmin=(773.357,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-62.3345,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CJC(C)C=O) + radical(CCOJ)"""),
)
species(
label = 'CCC[CH]C([O])=O(2567)',
structure = SMILES('CCCC=C([O])[O]'),
E0 = (-123.306,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,350,440,435,1725,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,374.346,374.346,374.35,374.35],'cm^-1')),
HinderedRotor(inertia=(0.00120296,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.12703,'amu*angstrom^2'), symmetry=1, barrier=(12.6323,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.127029,'amu*angstrom^2'), symmetry=1, barrier=(12.6323,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.720821,0.0645773,-5.02095e-05,1.9969e-08,-3.20328e-12,-14706.1,26.5685], Tmin=(100,'K'), Tmax=(1473.1,'K')), NASAPolynomial(coeffs=[15.2262,0.0251895,-1.01021e-05,1.81791e-09,-1.22821e-13,-18979.6,-49.0305], Tmin=(1473.1,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-123.306,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(345.051,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(O2s-(Cds-Cd)H) + group(Cs-CsCsHH) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsCs) + radical(C=COJ) + radical(C=COJ)"""),
)
species(
label = 'CC[CH]CC([O])=O(872)',
structure = SMILES('CC[CH]CC([O])=O'),
E0 = (-90.9629,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2783.33,2816.67,2850,1425,1450,1225,1275,1270,1340,700,800,300,400,2750,2800,2850,1350,1500,750,1050,1375,1000,3025,407.5,1350,352.5,200,800,933.333,1066.67,1200,1333.33,1466.67,1600],'cm^-1')),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.156089,'amu*angstrom^2'), symmetry=1, barrier=(3.5888,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.2775,0.0477396,-2.22726e-05,3.94155e-09,-1.96743e-13,-10887.8,24.0178], Tmin=(100,'K'), Tmax=(2323.93,'K')), NASAPolynomial(coeffs=[24.2284,0.0180429,-8.32369e-06,1.43721e-09,-8.83974e-14,-23273.8,-105.091], Tmin=(2323.93,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-90.9629,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-O2d)H) + group(Cs-CsCsHH) + group(Cs-CsCsHH) + group(Cs-(Cds-O2d)CsHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + radical(CCJCC=O) + radical(CCOJ)"""),
)
species(
label = 'CCC1COC1=O(2668)',
structure = SMILES('CCC1COC1=O'),
E0 = (-349.786,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.98936,0.0279234,5.64523e-05,-9.29981e-08,3.83436e-11,-41982.1,22.3799], Tmin=(100,'K'), Tmax=(927.493,'K')), NASAPolynomial(coeffs=[12.1574,0.0256931,-7.25367e-06,1.17602e-09,-8.18666e-14,-45658.5,-35.5606], Tmin=(927.493,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-349.786,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(349.208,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-O2d)) + group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cds-OdCsOs) + ring(Beta-Propiolactone)"""),
)
species(
label = '[CH2]C=C([O])OCC(2679)',
structure = SMILES('[CH2]C=C([O])OCC'),
E0 = (-85.7075,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,350,440,435,1725,2750,2800,2850,1350,1500,750,1050,1375,1000,3000,3100,440,815,1455,1000,3010,987.5,1337.5,450,1655,180,586.069,587.814,588.934],'cm^-1')),
HinderedRotor(inertia=(0.323961,'amu*angstrom^2'), symmetry=1, barrier=(18.3575,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.79834,'amu*angstrom^2'), symmetry=1, barrier=(18.3554,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0754145,'amu*angstrom^2'), symmetry=1, barrier=(18.345,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.798296,'amu*angstrom^2'), symmetry=1, barrier=(18.3544,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (100.116,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[0.343572,0.0704095,-5.23135e-05,1.26291e-08,1.44751e-12,-10167.9,25.0923], Tmin=(100,'K'), Tmax=(1025.79,'K')), NASAPolynomial(coeffs=[17.4993,0.0219137,-8.30766e-06,1.51781e-09,-1.06669e-13,-14655.7,-62.8301], Tmin=(1025.79,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-85.7075,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(340.893,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(O2s-(Cds-Cd)H) + group(Cs-CsOsHH) + group(Cs-CsHHH) + group(Cs-(Cds-Cds)HHH) + group(Cds-CdsCsH) + group(Cds-CdsCsCs) + radical(C=COJ) + radical(Allyl_P)"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'CC[CH]C([O])=O(2500)',
structure = SMILES('CCC=C([O])[O]'),
E0 = (-99.5259,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,350,440,435,1725,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,434.783,437.593,441.181],'cm^-1')),
HinderedRotor(inertia=(0.09254,'amu*angstrom^2'), symmetry=1, barrier=(12.399,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0913649,'amu*angstrom^2'), symmetry=1, barrier=(12.4151,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (86.0892,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.45968,0.0487831,-3.32284e-05,8.45923e-09,1.32071e-13,-11872.8,21.6665], Tmin=(100,'K'), Tmax=(1120.9,'K')), NASAPolynomial(coeffs=[12.4208,0.0197755,-7.9368e-06,1.46183e-09,-1.0159e-13,-14965,-35.2982], Tmin=(1120.9,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-99.5259,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(274.378,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-(Cds-Cd)H) + group(O2s-(Cds-Cd)H) + group(Cs-(Cds-Cds)CsHH) + group(Cs-CsHHH) + group(Cds-CdsCsH) + group(Cds-CdsCsCs) + radical(C=COJ) + radical(C=COJ)"""),
)
species(
label = 'O(4)',
structure = SMILES('[O]'),
E0 = (243.005,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (15.9994,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,29226.7,5.11107], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,29226.7,5.11107], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(243.005,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""O""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = '[CH2]C([C]=O)CC(2680)',
structure = SMILES('[CH2]C([C]=O)CC'),
E0 = (112.555,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,1855,455,950,1380,1390,370,380,2900,435,3000,3100,440,815,1455,1000,2750,2800,2850,1350,1500,750,1050,1375,1000,301.166],'cm^-1')),
HinderedRotor(inertia=(0.00184205,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.132548,'amu*angstrom^2'), symmetry=1, barrier=(8.73205,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.135339,'amu*angstrom^2'), symmetry=1, barrier=(8.73252,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.133137,'amu*angstrom^2'), symmetry=1, barrier=(8.73985,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (84.1164,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.50737,0.057543,-5.11715e-05,2.66539e-08,-6.01188e-12,13624.7,24.2058], Tmin=(100,'K'), Tmax=(1029.09,'K')), NASAPolynomial(coeffs=[8.04044,0.0321494,-1.41578e-05,2.67556e-09,-1.86745e-13,12280.1,-7.49981], Tmin=(1029.09,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(112.555,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(315.95,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(Cs-(Cds-O2d)CsCsH) + group(Cs-CsCsHH) + group(Cs-CsHHH) + group(Cs-CsHHH) + group(Cds-OdCsH) + radical(CJC(C)C=O) + radical(CC(C)CJ=O)"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (-86.1147,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (64.8136,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (128.484,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (34.5101,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (35.179,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (-86.1147,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (59.2904,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (60.7433,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (24.9298,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (-32.2248,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS11',
E0 = (-3.27153,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS12',
E0 = (-57.7723,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS13',
E0 = (287.619,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS14',
E0 = (270.703,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS15',
E0 = (77.799,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS16',
E0 = (-22.7146,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS17',
E0 = (357.527,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS18',
E0 = (73.8204,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS19',
E0 = (36.6355,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS20',
E0 = (-77.8304,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS21',
E0 = (228.093,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS22',
E0 = (282.037,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS23',
E0 = (355.56,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['butene1(127)', 'CO2(13)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['CCC1CC1([O])[O](2669)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(1.34238e+09,'s^-1'), n=0.889391, Ea=(150.928,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_S;multiplebond_intra;radadd_intra_cs2H] for rate rule [R4_S_CO;carbonylbond_intra;radadd_intra_cs2H]
Euclidian distance = 1.41421356237
family: Intra_R_Add_Exocyclic
Ea raised from 147.9 to 150.9 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction3',
reactants = ['H(3)', 'C=C(CC)C([O])=O(2670)'],
products = ['[CH2]C(CC)C([O])=O(873)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(72.3521,'m^3/(mol*s)'), n=1.66655, Ea=(10.8198,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds-OneDeCs_Cds;HJ] for rate rule [Cds-COCs_Cds;HJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['C2H5(29)', 'tSPC_1553(806)'],
products = ['[CH2]C(CC)C([O])=O(873)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(0.00119108,'m^3/(mol*s)'), n=2.41, Ea=(22.4155,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cds-OneDeH_Cds;CsJ-CsHH] for rate rule [Cds-COH_Cds;CsJ-CsHH]
Euclidian distance = 1.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction5',
reactants = ['butene1(127)', '[O][C]=O(669)'],
products = ['[CH2]C(CC)C([O])=O(873)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(0.00168615,'m^3/(mol*s)'), n=2.52599, Ea=(19.6608,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Cds-CsH_Cds-HH;CJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction6',
reactants = ['[CH2][CH]CC(130)', 'CO2(13)'],
products = ['[CH2]C(CC)C([O])=O(873)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(8.04,'m^3/(mol*s)'), n=1.68, Ea=(61.3479,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Cdd_Od;CJ] for rate rule [CO2;CJ]
Euclidian distance = 1.0
family: R_Addition_MultipleBond
Ea raised from 58.1 to 61.3 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction7',
reactants = ['CC[C](C)C([O])=O(2671)'],
products = ['[CH2]C(CC)C([O])=O(873)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(2.307e+09,'s^-1'), n=1.31, Ea=(203.342,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 163 used for R2H_S;C_rad_out_OneDe/Cs;Cs_H_out_2H
Exact match found for rate rule [R2H_S;C_rad_out_OneDe/Cs;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['C[CH]C(C)C([O])=O(2672)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(1.18e+10,'s^-1'), n=0.82, Ea=(146.858,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""From training reaction 186 used for R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/NonDeC
Exact match found for rate rule [R3H_SS_Cs;C_rad_out_2H;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction9',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['[CH2][C](CC)C(=O)O(2673)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(5.99823e+07,'s^-1'), n=1.57622, Ea=(111.045,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3H_SS;O_rad_out;XH_out]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction10',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['[CH2]C([CH]C)C(=O)O(2674)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(420000,'s^-1'), n=1.76, Ea=(53.8899,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2500,'K'), comment="""From training reaction 326 used for R4H_SSS;O_rad_out;Cs_H_out_H/NonDeC
Exact match found for rate rule [R4H_SSS;O_rad_out;Cs_H_out_H/NonDeC]
Euclidian distance = 0
Multiplied by reaction path degeneracy 4.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction11',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['[CH2]CC(C)C([O])=O(2675)'],
transitionState = 'TS11',
kinetics = Arrhenius(A=(114000,'s^-1'), n=1.74, Ea=(82.8432,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 109 used for R4H_SSS;C_rad_out_2H;Cs_H_out_2H
Exact match found for rate rule [R4H_SSS;C_rad_out_2H;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 3.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction12',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['[CH2]CC([CH2])C(=O)O(2676)'],
transitionState = 'TS12',
kinetics = Arrhenius(A=(3.55e+09,'s^-1','*|/',3), n=0.686, Ea=(28.3424,'kJ/mol'), T0=(1,'K'), comment="""From training reaction 1 used for R5H_CCCC(O2d);O_rad_out;Cs_H_out_2H
Exact match found for rate rule [R5H_CCCC(O2d);O_rad_out;Cs_H_out_2H]
Euclidian distance = 0
Multiplied by reaction path degeneracy 6.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction13',
reactants = ['[CH2][CH]CC(130)', '[O][C]=O(669)'],
products = ['[CH2]C(CC)C([O])=O(873)'],
transitionState = 'TS13',
kinetics = Arrhenius(A=(7.46075e+06,'m^3/(mol*s)'), n=0.027223, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction14',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['[CH2]C(CC)[C]1OO1(2677)'],
transitionState = 'TS14',
kinetics = Arrhenius(A=(1.55936e+11,'s^-1'), n=0.551275, Ea=(356.817,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3_linear;multiplebond_intra;radadd_intra] for rate rule [R3_CO;carbonyl_intra_Nd;radadd_intra_O]
Euclidian distance = 2.44948974278
family: Intra_R_Add_Endocyclic
Ea raised from 354.9 to 356.8 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction15',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['CCC1CO[C]1[O](2678)'],
transitionState = 'TS15',
kinetics = Arrhenius(A=(6.54148e+08,'s^-1'), n=0.924088, Ea=(163.914,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R4_S;multiplebond_intra;radadd_intra_cs2H] for rate rule [R4_S_CO;carbonyl_intra;radadd_intra_cs2H]
Euclidian distance = 1.41421356237
Multiplied by reaction path degeneracy 2.0
family: Intra_R_Add_Endocyclic
Ea raised from 160.7 to 163.9 kJ/mol to match endothermicity of reaction."""),
)
reaction(
label = 'reaction16',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['C=C(CC)C(=O)O(883)'],
transitionState = 'TS16',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad;XH_Rrad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction17',
reactants = ['CH2(S)(23)', '[CH2]C(C)C([O])=O(929)'],
products = ['[CH2]C(CC)C([O])=O(873)'],
transitionState = 'TS17',
kinetics = Arrhenius(A=(1.31021e+06,'m^3/(mol*s)'), n=0.189, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [carbene;C_pri] for rate rule [carbene;C_pri/NonDeC]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 3.0
family: 1,2_Insertion_carbene
Ea raised from -1.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction18',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['CCC[CH]C([O])=O(2567)'],
transitionState = 'TS18',
kinetics = Arrhenius(A=(6.55606e+10,'s^-1'), n=0.64, Ea=(159.935,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;C] for rate rule [cCs(-HC)CJ;CsJ-HH;C]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction19',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['CC[CH]CC([O])=O(872)'],
transitionState = 'TS19',
kinetics = Arrhenius(A=(8.889e+11,'s^-1'), n=0.232, Ea=(122.75,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [cCs(-HC)CJ;CsJ;CO] for rate rule [cCs(-HC)CJ;CsJ-HH;CO]
Euclidian distance = 1.0
family: 1,2_shiftC"""),
)
reaction(
label = 'reaction20',
reactants = ['[CH2]C(CC)C([O])=O(873)'],
products = ['CCC1COC1=O(2668)'],
transitionState = 'TS20',
kinetics = Arrhenius(A=(3.24e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Estimated using template [R4_SSS;C_rad_out_2H;Ypri_rad_out] for rate rule [R4_SSS;C_rad_out_2H;Opri_rad]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: Birad_recombination"""),
)
reaction(
label = 'reaction21',
reactants = ['[CH2]C=C([O])OCC(2679)'],
products = ['[CH2]C(CC)C([O])=O(873)'],
transitionState = 'TS21',
kinetics = Arrhenius(A=(7040,'s^-1'), n=2.66, Ea=(313.8,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R_ROR;R1_doublebond;R2_doublebond;R_O_C]
Euclidian distance = 0
family: ketoenol"""),
)
reaction(
label = 'reaction22',
reactants = ['CH2(19)', 'CC[CH]C([O])=O(2500)'],
products = ['[CH2]C(CC)C([O])=O(873)'],
transitionState = 'TS22',
kinetics = Arrhenius(A=(1.06732e+06,'m^3/(mol*s)'), n=0.472793, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H/OneDeC;Birad]
Euclidian distance = 4.0
family: Birad_R_Recombination
Ea raised from -3.5 to 0 kJ/mol."""),
)
reaction(
label = 'reaction23',
reactants = ['O(4)', '[CH2]C([C]=O)CC(2680)'],
products = ['[CH2]C(CC)C([O])=O(873)'],
transitionState = 'TS23',
kinetics = Arrhenius(A=(2085.55,'m^3/(mol*s)'), n=1.09077, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [Y_rad;O_birad] for rate rule [CO_rad/NonDe;O_birad]
Euclidian distance = 3.0
family: Birad_R_Recombination
Ea raised from -8.3 to 0 kJ/mol."""),
)
network(
label = '417',
isomers = [
'[CH2]C(CC)C([O])=O(873)',
],
reactants = [
('butene1(127)', 'CO2(13)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '417',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
|
from django.db import models
# Create your models here.
class myapp(models.Model):
''' Models for myapp '''
email = models.EmailField()
friend = models.ForeignKey("self", related_name='referral',\
null=True, blank=True)
ref_id = models.CharField(max_length = 120, default = 'ABC', unique = True)
ip_address = models.CharField(max_length = 120 , default = 'ABC')
timestamp = models.DateTimeField(auto_now_add = True, auto_now = False)
updated = models.DateTimeField(auto_now_add = False, auto_now = True)
def __unicode__(self):
return self.email
class Meta:
unique_together = ['email','ref_id'] |
from netmiko import ConnectHandler
import getpass
username = raw_input("Username: ")
password = getpass.getpass()
r1 = {
"device_type" : "cisco_ios",
"ip" : "10.10.10.1",
"username" : usename,
"password" : password
}
r2 = {
"device_type" : "cisco_ios",
"ip" : "10.10.10.2",
"username" : usename,
"password" : password
}
r3 = {
"device_type" : "cisco_ios",
"ip" : "10.10.10.3",
"username" : usename,
"password" : password
}
r4 = {
"device_type" : "cisco_ios",
"ip" : "10.10.10.4",
"username" : usename,
"password" : password
}
router_list = [r1,r2,r3,r4]
for router in router_list:
conn = ConnectHandler(**router)
print "IP Address on {}".format(router["ip"])
print conn.send_command("show ip int brief")
print "\n\n" |
import math
from drafter.utils import Rect
from drafter.layouts import Node, Row, Column
from drafter.nodes import Text, Canvas
from drafter.shapes import Shape, String, Pie, Pango, LineShape
from ..common.color import Color
from ..common.utils import fmt_num
from ..common.boiler import boil
def TrainingsFooter(**kwargs):
return Text(
**kwargs,
text=boil('training_footer'),
font_family='Roboto Condensed Light',
font_size=6,
)
def Label(label, color):
return Row(
margin=Rect([0, 10, 0, 10]),
padding=Rect([8, 10, 5, 10]),
).add(
Node(
width=7,
height=7,
bg_color=color,
margin=Rect([0, 4, 0, 0]),
),
Text(
text=label,
font='Roboto Light',
font_size=6,
)
)
class PieChart(Shape):
label = ''
items = []
def render(self, ctx):
String(
pos=[self.w/2, 0],
text=self.label,
font='Roboto Light',
font_size=7,
alignment=Text.CENTER,
).repos_to_center(ctx).render(ctx)
pie_center = [self.w/2, self.h/2 + 9]
radius = min(self.w/2, self.h/2)
last_angle = None
total_val = sum(
[
item['value'] if item['value'] is not None else 0
for item in self.items
]
)
# we need to draw numbers at the end so they're on top
# - store them in this array then render after pies
nums = []
# TODO: more graceful
if total_val > 0:
for it, item in enumerate(self.items):
value = item['value']
color = item['color']
value_in_radians = value / total_val * 2 * math.pi
if last_angle is None:
last_angle = -math.pi / 2.5 - value_in_radians / 2
angle = last_angle + value_in_radians
# don't show outline if very small sliver
pct_cov = value / total_val
if pct_cov != 0:
if .05 < pct_cov < .95:
l_w = 1
else:
l_w = 0
pie = Pie(
center=pie_center,
radius=radius,
color=color,
# only give an outline line if we have more than 10 pct
# (so we don't have white sliver
line_width=l_w,
line_color=Color.WHITE,
angle1=(last_angle),
angle2=(angle),
)
pie.render(ctx)
last_angle = angle
num_pos = pie.calc_center()
if pct_cov < .05:
num_pos[0] += 7
nums.append(
String(
pos=num_pos,
text=fmt_num(value),
font_family='Roboto Condensed',
font_size=8,
color=Color.WHITE,
font_weight=Pango.Weight.BOLD,
line_cap=LineShape
)
)
for v in nums:
if len(nums) == 1:
nums[0].pos = pie.calc_central_point(1)
v.repos_to_center(ctx).render(ctx)
# SlantedLine(
# p1 = [pie_center[0], pie_center[1] - radius],
# p2 = [25, -2],
# pct_cut = .08,
# rel_move = True,
# line_cap = LineShape.CAP_SQUARE,
# line_color = Color.ORANGE,
# # line_dash = [2,2],
# line_width = .5
# ).render(ctx)
def _calc_reached(reqd, reached):
return max(0, reqd - reached)
def Trainings(data):
short = data['short']
vocational = data['vocational']
short_training = [
{'value': short['reached'], 'color': Color.ACCENT},
{'value': _calc_reached(short['reqd'], short['reached']), 'color': Color.GRAY},
]
vocational_training = [
{'value': vocational['reached'], 'color': Color.ACCENT},
{'value': _calc_reached(vocational['reqd'], vocational['reached']), 'color': Color.GRAY},
]
items = [
{'label': boil('training_reached'), 'color': Color.ACCENT},
{'label': boil('training_remaining'), 'color': Color.GRAY},
]
return Column(
width='100%',
height='100%',
padding=Rect([10, 10, 4, 10]),
).add(
Text(
height=13,
text=boil('training_sub_title'),
color=Color.PRIMARY,
font_family='Roboto Condensed',
font_size=8,
font_weight=Pango.Weight.BOLD,
),
Row(width='100%', height='100% - 32', padding=Rect(4)).add(
Canvas(
width='50%',
height='100%',
renderer=PieChart(
items=short_training,
label=boil('training_short_training'),
)
),
Canvas(
width='50%',
height='100%',
renderer=PieChart(
items=vocational_training,
label=boil('training_vocational_training'),
)
),
),
Row(
width='100%',
height=16,
justify='center',
align='center',
).add(
*[
Label(
label=item['label'],
color=item['color'],
) for item in items
]
),
)
|
from django.shortcuts import render,redirect
from .models import SDiscussion,DComment
# Create your views here.
def discussionList(request):
all_discussion = SDiscussion.objects.filter()
return render(request, 'discussion/discussionList.html',{
'd' : all_discussion
})
def inDiscussion(request,that_discussion_id):
# print(that_discussion_id)
that_discussion = SDiscussion.objects.get(id = that_discussion_id)
comments = DComment.objects.filter(Tdiscussion = that_discussion)
# print(that_discussion,comments)
return render(request, 'discussion/inDiscussion.html',{
'that_discussion' : that_discussion,
'comments' : comments
})
def replyDiscussion(request,that_discussion_id):
if request.method == "POST":
myComment = request.POST['myComment']
Tdiscussion = SDiscussion.objects.get(id=that_discussion_id)
ComUser = request.user
done = DComment(Tdiscussion=Tdiscussion, ComUser=ComUser, myComment=myComment)
done.save()
print(Tdiscussion)
return redirect('discussion:inDiscussion',that_discussion_id=that_discussion_id)
|
from django.shortcuts import render
from django.http import HttpResponse, Http404
from .models import Pet, Vaccine
from django.db import models
def home(request):
try:
allpets = Pet.objects.all()
except:
raise Http404('we could not load pets for you')
return render(request, 'home.html', {
'pets': allpets,
'testArg': 2137
})
def pet_detail(request, pet_id: int):
try:
pet = Pet.objects.get(id=pet_id)
return render(request, 'pet_detail.html', {
'pet': pet
}) # request obj, name of html page, context to pass
except Pet.DoesNotExist:
return home(request)
def create_pet_get(request):
pet = Pet(name='random', breed='random')
return render(request, 'create_pet.html', {'pet': pet})
def create_pet_post(request, pet_name, pet_breed):
try:
pet2 = Pet(name=pet_name, submitter='asd', species='asdasd', breed=pet_breed,
description='asdasd', sex='M', submission_date='2007-01-01 10:00:00')
pet2.save()
except:
return render(request, 'dupa.html')
# return Http404('we could not create a pet')
return home(request)
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
import unittest
from telemetry.core import util
from telemetry.internal.platform import linux_based_platform_backend
class TestLinuxBackend(linux_based_platform_backend.LinuxBasedPlatformBackend):
# pylint: disable=abstract-method
def __init__(self):
super(TestLinuxBackend, self).__init__()
self._mock_files = {}
def SetMockFile(self, filename, output):
self._mock_files[filename] = output
def GetFileContents(self, filename):
return self._mock_files[filename]
class LinuxBasedPlatformBackendTest(unittest.TestCase):
def SetMockFileInBackend(self, backend, real_file, mock_file):
with open(os.path.join(util.GetUnittestDataDir(), real_file)) as f:
backend.SetMockFile(mock_file, f.read())
def testGetSystemTotalPhysicalMemory(self):
backend = TestLinuxBackend()
self.SetMockFileInBackend(backend, 'proc_meminfo', '/proc/meminfo')
result = backend.GetSystemTotalPhysicalMemory()
# 67479191552 == MemTotal * 1024
self.assertEquals(result, 67479191552)
|
lst = [1, 15, 22, 0, 10, -1]
def bubble_sort(lst):
sort = lst[:]
for i in range(len(sort) - 1):
for j in range(len(sort) - 1 - i):
if sort[j] > sort[j + 1]:
sort[j], sort[j + 1] = sort[j + 1], sort[j]
return sort
print(bubble_sort(lst))
|
# -*- coding:UTF-8 -*-
__author__ = 'joy'
import sys
from common import one_vehicle_price_sum
reload(sys)
sys.setdefaultencoding('utf8')
#计算整车价格
#start_province指编号
#unloadWay装卸方式
#agingWay时效方式
#invoiceWay发票方式
def getOneVehicleLinePrice(start_province,start_city,start_district,arrive_province,arrive_city,arrive_district,
tonnage,cube,goodsName,selectCalcuteWay,distance,unloadWay,invoiceWay,agingWay,origin,destination,sessionId,environment):
#总公式:(总价*优惠模板*时效系数+装卸费)*发票
#总价*优惠模板*时效系数+装卸费
cPrice = one_vehicle_price_sum.getVehicleSumPrice(tonnage,cube,start_province, start_city, start_district, arrive_province, arrive_city, arrive_district,goodsName,distance,
selectCalcuteWay, sessionId, environment,unloadWay, agingWay, origin, destination)
#发票
if (invoiceWay == "无需发票"):
dPrice = cPrice
if (invoiceWay == "10%发票") :
dPrice = cPrice * 1.07
return dPrice
|
"""
URL: https://stepik.org/lesson/334150/step/10?unit=317559
convert CamelCaseString to python_snake_string
"""
# my solution:
def convert_to_python_case(text):
import re
words = re.findall('[A-Z][^A-Z]*', text)
return '_'.join([str(word.lower()) for word in words])
# alternative solution 1:
def convert_to_python_case(text):
s = ''
for el in text:
if el.isupper():
s += '_'
s += el.lower()
return s[1:]
# alternative solution 2:
def convert_to_python_case(text):
s = text[0].lower()
for c in text[1:]:
s += ('_' + c.lower() if c.isupper() else c)
return s |
# print(3+5)
# print("3+5")
# print(type(3.14))
# print(type(type(42)))
# print(type(3.1)== float)
friend= "Lee"
Friend= "Park"
pi= 3.14
answer= 20
print(friend, pi, answer)
print(Friend==friend)
# 변수 이름 만들기: 알아볼 수 있는 대표적인 이름으로, 주석으로 설명 달아주기.
|
# Function to print the desired
# Alphabet Z Pattern
def alphabetPattern(N):
# Declaring the values of Right,
# Left and Diagonal values
Top, Bottom, Diagonal = 1, 1, N - 1
# Loop for printing the first row
for index in range(N):
print(Top, end=' ')
Top += 1
print()
# Main Loop for the rows from (2 to n-1)
for index in range(1, N - 1):
# Spaces for the diagonals
for side_index in range(2 * (N - index - 1)):
print(' ', end='')
# Printing the diagonal values
print(Diagonal, end='')
Diagonal -= 1
print()
# Loop for printing the last row
for index in range(N):
print(Bottom, end=' ')
Bottom += 1
# Driver Code
# Number of rows
N = 5
alphabetPattern(N) |
import subprocess
from py_utils import config_utils
from grovepi import digitalRead, pinMode
class HardwareHandler:
'''
The HardwareHandler groups all interactions with the hardware that are not detected through the touchscreen (or clicks) and reads from the
connected sensors.
Attributes:
distance (int): value read from infrared distance sensor (either 1 or 0)
dist_port (int): IC2 port from which to read distance sensor data, specified in config.txt
lights (subprocess): variable pointing to subprocess that runs light scripts in background
player (subprocess): variable pointing to subprocess that runs mplayer in background to enable sound starting and stopping
'''
distance = None
dist_port = None
lights = None
player = None
def __init__(self):
'''
Initialises HardwareHandler by setting class attributes.
'''
self.distance = None
self.player = None
self.dist_port = config_utils.get_config_value("DIST_PORT")
def update_distance(self):
'''
Reads distance output from infrared distance sensor and saves the value to self.distance.
'''
pinMode(self.dist_port,"INPUT")
self.distance = digitalRead(self.dist_port)
pass
def lights_on(self):
'''
Tries to run lights_on.py file as a background process.
'''
print("lights on")
self.lights = subprocess.Popen(["python3", "src/hardware/lights_on.py"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def lights_off(self):
'''
Tries to run lights_off.py file as a background process.
'''
print("lights off")
self.lights = subprocess.Popen(["python3", "src/hardware/lights_off.py"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def sunrise(self):
'''
Tries to run sunrise.py file as a background process.
'''
self.lights = subprocess.Popen(["python3", "src/hardware/sunrise.py"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def start_sound(self, sound_file, loop=False):
'''
Starts mplayer subprocess playing specified sound file and attaching it to this class' player variable.
Args:
sound_file (str): str value specifying path to sound file
loop (bool): boolean value indicating if sound file should be looped indefinetely, default: False
'''
print("Playing {}".format(sound_file))
if not loop:
self.player = subprocess.Popen(["mplayer", sound_file], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
self.player = subprocess.Popen(["mplayer", "-loop", "0", sound_file], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def stop_sound(self):
'''
Stops currently running mplayer subprocess attached to this class if one exists.
'''
try:
self.player.kill()
except:
pass
|
import lxml.etree as ET
import xmltodict
import os
import parmap
import pathlib
import sys
def xml_check(xml_file):
if pathlib.Path(xml_file).is_file():
try:
xml = ET.parse(xml_file)
return xml
except ET.XMLSyntaxError:
try:
xml = ET.XML(bytes(bytearray(xml_file, encoding='utf-8')))
return xml
except ET.XMLSyntaxError:
print('error at %s' % xml_file)
return False
else:
print('File: %s, not found' % xml_file)
return False
def filelist(directory):
xml_files = []
for subdir, dirs, files in os.walk(directory):
for file in files:
file_path = subdir + os.sep + file
if file_path.endswith(".xml"):
xml_files.append(file_path)
return xml_files
def mmd2iso(mmd_file, xslt):
try:
mmd = ET.parse(mmd_file)
except OSError as e:
mmd = ET.XML(bytes(bytearray(mmd_file, encoding='utf-8')))
xslt = ET.parse(xslt)
transform = ET.XSLT(xslt)
iso = transform(mmd)
return xmltodict.parse(iso)
def fixrecord(doc, pretty=False):
for i, v in enumerate(doc['gmd:MD_Metadata']
['gmd:distributionInfo']
['gmd:MD_Distribution']
['gmd:transferOptions']
['gmd:MD_DigitalTransferOptions']
['gmd:onLine']):
if v['gmd:CI_OnlineResource']\
['gmd:protocol']\
['gco:CharacterString']\
['#text'] == 'OPeNDAP':
doc['gmd:MD_Metadata']\
['gmd:distributionInfo'] \
['gmd:MD_Distribution'] \
['gmd:transferOptions'] \
['gmd:MD_DigitalTransferOptions'] \
['gmd:onLine'] \
[i] \
['gmd:CI_OnlineResource'] \
['gmd:linkage'] \
['gmd:URL'] = v['gmd:CI_OnlineResource']['gmd:linkage']['gmd:URL'] + '.html'
if v['gmd:CI_OnlineResource'] \
['gmd:protocol'] \
['gco:CharacterString'] \
['#text'] == 'OGC WMS':
doc['gmd:MD_Metadata'] \
['gmd:distributionInfo'] \
['gmd:MD_Distribution'] \
['gmd:transferOptions'] \
['gmd:MD_DigitalTransferOptions'] \
['gmd:onLine'][i]['gmd:CI_OnlineResource'] \
['gmd:protocol'] \
['gco:CharacterString'] \
['#text'] = 'OGC:WMS'
doc['gmd:MD_Metadata'] \
['gmd:distributionInfo'] \
['gmd:MD_Distribution'] \
['gmd:transferOptions'] \
['gmd:MD_DigitalTransferOptions'] \
['gmd:onLine'][i]['gmd:CI_OnlineResource'] \
['gmd:description'] \
['gco:CharacterString'] \
['#text'] = 'OGC:WMS'
if doc['gmd:MD_Metadata'] \
['gmd:fileIdentifier'] \
['gco:CharacterString'] \
['#text'][:3] == 'S2A':
doc['gmd:MD_Metadata'] \
['gmd:distributionInfo'] \
['gmd:MD_Distribution'] \
['gmd:transferOptions'] \
['gmd:MD_DigitalTransferOptions'] \
['gmd:onLine'] \
[i] \
['gmd:CI_OnlineResource'] \
['gmd:linkage']['gmd:URL'] = v['gmd:CI_OnlineResource'] \
['gmd:linkage'] \
['gmd:URL'].replace('http://nbswms.met.no/thredds/wms/',
'http://nbswms.met.no/thredds/wms_jpeg/') \
+ "?SERVICE=WMS&REQUEST=GetCapabilities"
if doc['gmd:MD_Metadata'] \
['gmd:fileIdentifier'] \
['gco:CharacterString'] \
['#text'][:3] in ['S1A', 'S1B', 'S2B']:
doc['gmd:MD_Metadata'] \
['gmd:distributionInfo'] \
['gmd:MD_Distribution'] \
['gmd:transferOptions'] \
['gmd:MD_DigitalTransferOptions'] \
['gmd:onLine'] \
[i] \
['gmd:CI_OnlineResource'] \
['gmd:linkage']['gmd:URL'] = v['gmd:CI_OnlineResource'] \
['gmd:linkage'] \
['gmd:URL'] + "?SERVICE=WMS&REQUEST=GetCapabilities"
return xmltodict.unparse(doc, pretty=pretty)
def writerecord(inputfile, xsl='/usr/local/share/mmd-to-iso.xsl', outdir='/home/pycsw/sample_data/nbs_iso'):
pathlib.Path(outdir).mkdir(parents=True, exist_ok=True)
iso_xml = mmd2iso(inputfile, xsl)
outputfile = pathlib.PurePosixPath(outdir).joinpath(pathlib.PurePosixPath(inputfile).name)
with open(outputfile, 'w') as isofix:
isofix.write(fixrecord(iso_xml, pretty=True))
def writeiso(inputfile, xsl='/usr/local/share/mmd-to-iso.xsl', outdir='/home/pycsw/sample_data/nbs_iso'):
pathlib.Path(outdir).mkdir(parents=True, exist_ok=True)
iso_xml = mmd2iso(inputfile, xsl)
outputfile = pathlib.PurePosixPath(outdir).joinpath(pathlib.PurePosixPath(inputfile).name)
with open(outputfile, 'w') as isofix:
isofix.write(xmltodict.unparse(iso_xml, pretty=True))
def main(metadata, outdir, fix):
xmlfiles = filelist(metadata)
if fix:
# y = parmap.map(writerecord, xmlfiles, outdir=outdir, pm_pbar=False)
print(fix)
else:
# y = parmap.map(writeiso, xmlfiles, outdir=outdir, pm_pbar=False)
print('no fix')
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Convert mmd xml files to ISO')
parser.add_argument("-i", "--input-dir", help="directory with input MMD")
parser.add_argument("-o", "--output-dir", help="output directory with ISO")
parser.add_argument("-f", "--fix", help="perform iso fix if True")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_arguments()
fix = False
if args.fix:
fix = True
main(metadata=args.input_dir, outdir=args.output_dir, fix=fix)
|
#! /usr/bin/env python
"""Tools for transforming CSV records and lists of CSV records.
"""
try:
from itertools import izip
except ImportError:
# For Python 3 compatibility
izip = zip
def add_column(existing_rows, new_column):
"""Take an existing iterable of rows, and add a new column of data to it.
>>> old = [['fred', 43], ['wilma', 34]]
>>> gender_column = ['male', 'female']
>>> list(add_column(old, gender_column))
[['fred', 43, 'male'], ['wilma', 34, 'female']]
"""
for row, new_field in izip(existing_rows, new_column):
row_copy = row[:]
row_copy.append(new_field)
yield row_copy
if __name__ == "__main__":
import doctest
doctest.testmod()
|
"""
Created on Fri Nov 29 12:33:10 2017
@author: Yannic Jänike
"""
import numpy as np
import random
from numpy.random import choice
import time
import matplotlib.pyplot as plt
class antColony():
class ant():
def __init__(self,init_location,possible_locations,pheromone_map,alpha,beta,first_pass):
"""
Initialite an ant with,
init_location(int) : initial position of the ant
possible_locations(List) : List of all possible possible_locations
path_cost(int) : Cost of the path the ant has traversed
pheromone_map(List) : List of List, where pheromone_map[i][j] represents row i at column j
Alpha(float) : determines impact of the pheromone_map in the path selection
Beta(float ) : determines impact of the distance between node i and i+1 in the path selection
first_pass(boolean) : determines if we are in the first iteration or not
"""
self.init_location = init_location
self.possible_locations = possible_locations
self.path = []
self.path_cost = 0
self.current_location = init_location
self.pheromone_map = pheromone_map
self.alpha = alpha
self.beta = beta
self.first_pass = True
self.update_path(init_location)
#---------------------------------------------SOLUTION CONSTRUCTION--------------------------------------#
def create_path(self):
"""
Create a path for the ant self
"""
#as long as the list of Possible locations is not empty, we search for the next node
while self.possible_locations:
next = self.pick_path()
self.traverse(self.current_location,next)
def pick_path(self):
"""
Pick a path from self.possible_locations and return it
"""
#if we are in the first iteration, just take a random path
if self.first_pass:
self.first_pass = False
return random.choice(self.possible_locations)
#else compute the path by the ACO edge selection Heuristic
#(pheromoneamount^alpha * (1/distance)^beta)/sum(all alowed moves)
#attractiveness is the list of numerators computed by the numerator of the formula above
attractiveness = []
#denominator has to be computed
denominator = 0.0
#for every location in the possible location, compute th likeliehood
for possbible_next_location in self.possible_locations:
#safe the values for the computation
pheromone_amount = float(self.pheromone_map[self.current_location][possbible_next_location])
distance = float(tspmap[self.current_location][possbible_next_location])
#if (self.alpha == 0) and (self.beta == 0):
#attractiveness.append(pheromone_amount*(1/distance))
#append the numerator list 'attractiveness' with the numerator of the likelyhood
attractiveness.append(pow(pheromone_amount, self.alpha)*pow(1/distance, self.beta))
#Compute the denominator by adding up all possible attractivnesses
denominator = float(sum(attractiveness))
#we have to avoid zero devisions, so we compute the smallest number not zero, if the denominator is 0
if denominator == 0.0:
def next_up(x):
import math
import struct
# NaNs and positive infinity map to themselves.
if math.isnan(x) or (math.isinf(x) and x > 0):
return x
# 0.0 and -0.0 both map to the smallest +ve float.
if x == 0.0:
x = 0.0
n = struct.unpack('<q', struct.pack('<d', x))[0]
if n >= 0:
n += 1
else:
n -= 1
return struct.unpack('<d', struct.pack('<q', n))[0]
for i in attractiveness:
attractiveness[i] = next_up(attractiveness[i])
denominator = next_up(denominator)
#fill the path Probability list with the computed likeliehoods
pathProbabilities = []
for i in range(len(self.possible_locations)):
if denominator != 0.0:
pathProbabilities.append(attractiveness[i]/denominator)
elif denominator == 0.0:
pathProbabilities.append(0)
#Sample the next path from the probabilities
toss = random.random()
cummulative = 0
for i in range(len(pathProbabilities)):
if toss <= (pathProbabilities[i] + cummulative):
next_city = self.possible_locations[i]
return next_city
cummulative += pathProbabilities[i]
#next city is the city with the highest probability - Old solution
#next_city = self.possible_locations[pathProbabilities.index(max(pathProbabilities))]
#---------------------------------------------SOLUTION CONSTRUCTION Ends--------------------------------------#
def traverse(self,oldCity,newCity):
"""
travel from the old node to the new node and update the ant parameters
oldCity(int) : the current locations
newCity(int) : the City we choose to visit next
"""
self.update_path(newCity)
self.update_pathCost(oldCity,newCity)
self.current_location = newCity
def update_path(self,newCity):
"""
add the new city to the path and remove it from the possible_locations list
"""
self.path.append(newCity)
self.possible_locations.remove(newCity)
def update_pathCost(self,oldCity,newCity):
"""
add the cost of the path to the new node to the total path_cost
"""
self.path_cost += tspmap[oldCity][newCity]
def __init__(self, start, ant_count, alpha, beta, pheromone_evaporation_coefficient, pheromone_constant, iterations):
"""
initialize an ant Colony
start(int) = the starting position of the
ant_cont(int) = number of the ants in the colony
Alpha(float) : determines impact of the pheromone_map in the path selection
Beta(float ) : determines impact of the distance between node i and i+1 in the path selection
pheromone_evaporation_coefficient(float) : how much pheromone evaporates in one iteration
pheromone_constant(float) : Parameter to regulate the amount of pheromone that is added to the pheromone_map
iterations(int) : numebr of iterations we run through
"""
# Matrix of the pheromone amount over iterations
self.pheromone_map = self.init_pheromone_map(len(tspmap))
# Matrix of pheromone amount in iteration
self.pheromone_map_iteration = self.init_pheromone_map(len(tspmap))
#start node is set to city 0
if start is None:
self.start = 0
else:
self.start = start
#ant_count
if type(ant_count) is not int:
raise TypeError("ant_count must be int")
if ant_count < 1:
raise ValueError("ant_count must be >= 1")
self.ant_count = ant_count
#alpha
if (type(alpha) is not int) and type(alpha) is not float:
raise TypeError("alpha must be int or float")
if alpha < 0:
raise ValueError("alpha must be >= 0")
self.alpha = float(alpha)
#beta
if (type(beta) is not int) and type(beta) is not float:
raise TypeError("beta must be int or float")
if beta < 0:
raise ValueError("beta must be >= 0")
self.beta = float(beta)
#pheromone_evaporation_coefficient
if (type(pheromone_evaporation_coefficient) is not int) and type(pheromone_evaporation_coefficient) is not float:
raise TypeError("pheromone_evaporation_coefficient must be int or float")
self.pheromone_evaporation_coefficient = float(pheromone_evaporation_coefficient)
#pheromone_constant
if (type(pheromone_constant) is not int) and type(pheromone_constant) is not float:
raise TypeError("pheromone_constant must be int or float")
self.pheromone_constant = float(pheromone_constant)
#iterations
if (type(iterations) is not int):
raise TypeError("iterations must be int")
if iterations < 0:
raise ValueError("iterations must be >= 0")
self.iterations = iterations
#other initial variables
self.first_pass = True
#add ants to the colony
self.colony = self.init_ants(self.start)
#sbest cost we have seen so far
self.shortest_distance = None
#shortest path we have seen so far
self.shortest_path_seen = None
#best ant in the iteration
self.shortest_ant_in_iteration = None
self.FirsAnt = True
def possible_locations(self):
"""
create a list of all possible locations
"""
possible_locations = list(range(len(tspmap)))
return possible_locations
def init_pheromone_map(self,value = 0.0):
"""
create the pheromone map,
has to be the same size of the tspmap
"""
size = len(tspmap)
p_map = []
for row in range(size):
p_map.append([float(value) for x in range(size)])
return p_map
def init_ants(self,start):
"""
Create ants, if it is first called, else we just 'reset' the ants with the initial values
"""
#If we are in the first iteration, initialize ants
if self.first_pass:
return [self.ant(start, self.possible_locations(), self.pheromone_map,
self.alpha, self.beta, first_pass=True) for _ in range(self.ant_count)]
#else reset every ant in the colony
for ant in self.colony:
ant.__init__(start,self.possible_locations(),self.pheromone_map,self.alpha,self.beta, self.first_pass)
#---------------------------------- EVAPORATION and INTENSIFICATION--------------------------------#
def update_pheromone_map(self):
"""
update the pheromone_map according to the formula
(1-pheromone_evap_constant)*(pheromoneampunt at position i,j) + sum(pheromoneConstant/length of ant_k if an ant traveld the edge, 0 otherwise)
"""
pheromone_factor = 1 - self.pheromone_evaporation_coefficient
#EVAPORATION update every entry in the pheromone_map
for i in range(len(self.pheromone_map)):
for j in range(len(self.pheromone_map)):
if i != j:
self.pheromone_map[i][j] = self.pheromone_map[i][j] * pheromone_factor
#if i=j we set the value to zero, because we dont want
else:
self.pheromone_map[i][j] = 0
#Intensification
#add the new pheromone values from the current iteration to the old pheromone_map
self.pheromone_map[i][j] += self.pheromone_map_iteration[i][j]
def update_pheromone_map_iteration(self,ant):
"""
update the pharomone_map_iteration with the computed pheromone values
sum(pheromoneConstant/length of ant_k if an ant traveld the edge, 0 otherwise)
where ant_k it the ant we passed
"""
path = ant.path
#iterate through the path of the ant and update the pheromone_map_iteration at each respective edge the ant has traveled
for i in range(len(path)-1):
current_pheromone_value = float(self.pheromone_map_iteration[path[i]][path[i + 1]])
new_pheromone_amount = self.pheromone_constant/ant.path_cost
#because the map is symetrical to the diagonal we only need to copy them with respect to the indizes
self.pheromone_map_iteration[path[i]][path[i + 1]] = current_pheromone_value + new_pheromone_amount
self.pheromone_map_iteration[path[i + 1]][path[i]] = current_pheromone_value + new_pheromone_amount
#---------------------------------- EVAPORATION and INTENSIFICATION ENDS--------------------------------#
def mainloop(self):
"""
mainloop which loops through the differnet steps:
for ant k ∈ {1,...,m}
construct a solution {solution finding}
endfor
forall pheromone values do
decrease the value by a certain percentage {evaporation}
endfor
forall pheromone values corresponding to good solutions
do
increase the value {intensification}
endfor
"""
terminate = 0
#Plotting Lists
iteration_results = []
iteration = []
shortest_in_iteration = []
while terminate < self.iterations:
terminate += 1
#SOLUTION FINDING
for ant in self.colony:
ant.create_path()
#COMPUTE INTENSIFICATION VALUES
for ant in self.colony:
self.update_pheromone_map_iteration(ant)
#set best path to an initial value
if self.FirsAnt:
self.shortest_ant_in_iteration = ant.path_cost
self.FirsAnt = False
if not self.shortest_distance:
self.shortest_distance = ant.path_cost
if not self.shortest_path_seen:
self.shortest_path_seen = ant.path_cost
#find the best path in all the ants in the iteration
if ant.path_cost < self.shortest_ant_in_iteration:
self.shortest_ant_in_iteration = ant.path_cost
#find overall best path
if ant.path_cost < self.shortest_distance:
#fill Iteartion List for Plot
iteration_results.append(ant.path_cost)
iteration.append(len(shortest_in_iteration))
terminate = 0
self.shortest_distance = ant.path_cost
self.shortest_path_seen = ant.path
print("#-------------------# Shortest Path : ", ant.path_cost," #------#")
print("Shortest Path: ", self.shortest_ant_in_iteration," Iterations left: ",self.iterations - terminate )
#save shortest ant in iteration for plot
shortest_in_iteration.append(self.shortest_ant_in_iteration)
#restet FirstAnt for next iteration
self.FirsAnt = True
#EVAPORATION and INTENSIFCATION
self.update_pheromone_map()
if self.first_pass:
self.first_pass = False
#Reset the ants in the colony
self.init_ants(self.start)
#reset the pheromone_map_iteration matrix
self.pheromone_map_iteration = self.init_pheromone_map()
#return the shortest distance and the path of the Shortest distance
return self.shortest_distance, self.shortest_path_seen, iteration_results, iteration, shortest_in_iteration
#---------------------------------------- CLASSES END --------------------------------------#
def read_file(filename):
"""
This function reads in the tsp files and converts them into int matrices. The matrix can be accessed globably with the variable name tspmat
"""
if filename == 1:
tspmat = np.loadtxt("1.tsp")
if filename == 2:
tspmat = np.loadtxt("2.tsp")
if filename == 3:
tspmat = np.loadtxt("3.tsp")
if filename == 4:
tspmat = np.loadtxt("4.tsp")
valuematrix = tspmat.astype(int)
return valuematrix
def initalize(benchmark):
global tspmap
tspmap = read_file(benchmark)
Colony = antColony(None, antnmbr, al, be, p_evap_co, p_factor, iterations)
shortest_distance, shortest_path, iteration_results, iteration, shortest_in_iteration = Colony.mainloop()
print("The shortest path has cost: ",shortest_distance)
print("Found in Generation: ",len(iteration_results))
fig, graph = plt.subplots()
x = np.arange(len(shortest_in_iteration))
graph.plot(x, shortest_in_iteration, color = 'g' )
#graph.plot(iteration,iteration_results,':', color = 'r' )
graph.plot(iteration,iteration_results,'ro', color = 'b' )
graph.plot(iteration[len(iteration)-1],iteration_results[len(iteration_results)-1],'ro', color = 'r' )
title = 'AntColonyOptimization - Ants: ' + str(antnmbr) + ', Alpha/Beta: ' + str(al) + '/' + str(be)
plt.title(title)
plt.ylabel('Cost')
plt.xlabel('Iteration')
#plt.annotate('global min', xy=(iteration[len(iteration)-1]+0.2, iteration_results[len(iteration)-1]), xytext=(iteration[len(iteration)-1]+2, iteration_results[len(iteration)-1]),arrowprops=dict(facecolor='black', shrink=0.05))
plt.show()
def user_input():
global antnmbr
global p_evap_co
global p_factor
global al
global be
global iterations
global default
benchmark = -1
antnmbr = -1
p_evap_co = -1
p_factor = -1
al = -1
be = -1
iterations = -1
default = -1
print("#----- USERINTERFACE - Input 0 for a default Value -----#")
#Default
while (default != 0) or (default != 1):
default = int(input("Do yo want to use default values? [0]Yes [1]No: "))
if default == 0:
benchmark = 1
antnmbr = 50
p_evap_co = 0.4
p_factor = 0.4
al = 1
be = 1
iterations = 20
print("")
print("####---------Initialize ACO with: ---------###")
print("")
print("Benchamrk: ",benchmark)
print("Number of ants: ",antnmbr)
print("Evaporation Coefficient: ",p_evap_co)
print("Pheromone Constant: ",p_factor)
print("Alpha Value: ",al)
print("Beta Value: ",be)
print("Terminate after ",iterations," Iterations without improvement.")
print("####-----------------------------------------###")
print("")
time.sleep(1.5)
initalize(benchmark)
return None
if default == 1:
#Benchmark Input
while (benchmark != 0) and (benchmark != 1) and (benchmark != 2) and (benchmark != 3):
if benchmark == -1:
benchmark = int(input("Please specify TSP benchmark to use [1],[2],[3]: "))
else:
benchmark = int(input("Benachmark must be [1],[2],[3]: "))
if benchmark == 0:
benchmark = 1
#AntNumber Input
while antnmbr < 0:
if antnmbr == -1:
antnmbr = int(input("Please specify number of ants to be used: "))
else:
antnmbr = int(input("Please specify number of ants (must be 0 for default or higher): "))
if antnmbr == 0:
antnmbr = 20
#Evaporation constant
while p_evap_co < 0:
if p_evap_co == -1:
p_evap_co = float(input("Please specify Evaporation Constant: "))
else:
p_evap_co = float(input("Please specify Evaporation Constant bigger than 0 or zero for default: "))
if p_evap_co == 0:
p_evap_co = 0.4
#Pheromone Factor
while p_factor < 0:
if p_factor == -1:
p_factor = float(input("Please specify Intensification Constant: "))
else:
p_factor = float(input("Please specify Intensification Constant: bigger than 0 or zero for default: "))
if p_factor == 0:
p_factor = 0.4
#Alpha
while al < 0:
if al == -1:
al = float(input("Please specify Alpha Value(no default): "))
else:
al = float(input("Please specify Alpha Value bigger or equal to zero: "))
#beta
while be < 0:
if be == -1:
be = float(input("Please specify Beta Value: "))
else:
be = float(input("Please specify Beta Value bigger or equal to zero: "))
while iterations < 1:
if iterations == -1:
iterations = int(input("Please specify the number of iterations without improvement before termination: "))
else:
iterations = int(input("Please specify the number of iterations before termination that is bigger than 0 or 0 for default: "))
if iterations == 0:
iterations = 20
print("")
print("Initialize ACO with:")
print("")
print("Benchamrk: ",benchmark)
print("Number of ants: ",antnmbr)
print("Evaporation Coefficient: ",p_evap_co)
print("Pheromone Constant: ",p_factor)
print("Alpha Value: ",al)
print("Beta Value: ",be)
print("Terminate after ",iterations," Iterations without improvement.")
print("")
initalize(benchmark)
return None
user_input()
|
class IncentivizeZero:
def __init__(self):
self.num_legal_actions = 10
self.num_possible_obs = 10
self.max_reward_per_action = 1
self.min_reward_per_action = -1
self.fnc = incentivize_zero
def incentivize_zero(T, play):
if len(play) == 0:
reward, obs = 0, 0
return (reward, obs)
n = (len(play)//3) - 1
rewards = {}
observations = {}
actions = {}
for i in range(n+1):
rewards[i] = play[3*i]
observations[i] = play[3*i+1]
actions[i] = play[3*i+2]
r_prime = {0: 0}
o_prime = {i:0 for i in range(n+2)}
a_prime = {}
inner_prompt = (r_prime[0], o_prime[0])
for i in range(n+1):
r_prime[i+1] = actions[i]
a_prime[i] = T(inner_prompt)
inner_prompt += (a_prime[i], r_prime[i+1], o_prime[i+1])
reward = 1 if T(inner_prompt) == 0 else -1
obs = 0
return (reward, obs) |
#------------------------------------------------------------------------------
# Name: Distance to Cloud Generator
# Description: Generates the distance to cloud from cloud mask
#
# Author: Robert S. Spencer
#
# Created: 7/11/2016
# Python: 2.7
#------------------------------------------------------------------------------
import os
import numpy as np
import pandas as pd
from pyhdf.SD import SD, SDC
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import time as tm
start_time = tm.time()
data = pd.read_csv('/Users/rsspenc3/Desktop/SEAC4RS/eMAS_vs_Aeronet/Compiled_Cleaned.csv',header=0)
cloud_dir = '/Users/rsspenc3/Desktop/SEAC4RS/DATA/eMAS_Clouds/'
#y = data['longitude(index)']
#x = data['latitude(index)']
n = data['location']
#t = data.index
HDFfile = data['eMAS_file']
indices = len(n)
#z1 = data['meanval_eMAS_550']
#z2 = data['meanval_aeronet_550_intrp']
for loc in range(2,indices,3): # Modify for parallel computing !!!!!!!!
print 'Computing file...'
eMAS_file = HDFfile[loc]
eMAS_ID = eMAS_file[-45:-37]
cloud_hdf = ''
for cloud_file in os.listdir(cloud_dir):
if cloud_file[-45:-37] == eMAS_ID:
print cloud_file
cloud_hdf = SD(cloud_dir+cloud_file, SDC.READ)
print eMAS_file
print cloud_hdf
# Aerosol_Cldmask_Land_Ocean
dataset = cloud_hdf.select('Cloud_Top_Height')
attrs = dataset.attributes(full=1)
fillvalue=attrs['_FillValue']
fv = float(fillvalue[0])
cld_msk = dataset[:,:].astype(float)
# handle the values along the boundaries (not sure why they exist...)
cld_msk[1] = fv
cld_msk[-2] = fv
cld_msk[:,1] = fv
cld_msk[:,-2] = fv
# convert to mask from cloud height dataset
cld_msk[cld_msk > -1] = 0
cld_msk[cld_msk == fv] = 1
cld_dist = np.empty([cld_msk.shape[0],cld_msk.shape[1]])
cld_dist.fill(np.nan)
rows = cld_msk.shape[0]
cols = cld_msk.shape[1]
print "total rows: ", rows
for i in range(rows):
n = 0
if i % 100 == 0:
print "row: ", i
for j in range(cols):
if cld_msk[i,j] == 1: # if clear
while True:
# Determines the next step size, s
# Step size gets added to search radias, n
# Optimized: Once cloud is found, the next pixel starts off with n - s instead of 0
if n < 10:
s = 1
elif n < 50:
s = 5
elif n < 100:
s = 10
elif n < 358: # half the swath width
s = 50
if n >= 358:
cld_dist[i,j] = n
n -= s
break
if n>i:
rowl = 0
else:
rowl = i - n
if n>j:
coll = 0
else:
coll = j - n
if n>rows-i:
rowu = rows
else:
rowu = i + n + 1
if n>cols-j:
colu = cols
else:
colu = j + n + 1
if 0 in cld_msk[rowl:rowu,coll:coll+s+1]: # LEFT
cld_dist[i,j] = n
n -= s
break
if 0 in cld_msk[rowl:rowu,colu-s-1:colu]: # RIGHT
cld_dist[i,j] = n
n -= s
break
if 0 in cld_msk[rowl:rowl+s+1,coll:colu]: # TOP
cld_dist[i,j] = n
n -= s
break
if 0 in cld_msk[rowu-s-1:rowu,coll:colu]: # BOTTOM
cld_dist[i,j] = n
n -= s
break
n += s
if cld_msk[i,j] == 0:
cld_dist[i,j] = 0
def rebin(a, shape):
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).mean(-1).mean(1)
rowsnip = cld_dist.shape[0]%10
colsnip = cld_dist.shape[1]%10
cld_dist_lowres = rebin(cld_dist[:len(cld_dist)-rowsnip,:len(cld_dist[0])-colsnip],[cld_dist.shape[0]/10,cld_dist.shape[1]/10])
print cld_dist.shape
print cld_dist_lowres.shape
np.savetxt("Dist_Cloud_Rasters_L2CLD2/{0}.csv".format(eMAS_file[-55:-4]), cld_dist_lowres, delimiter=",")
print("--- %s seconds ---" % (tm.time() - start_time)) |
##
# Copyright (c) 2007-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from caldavclientlibrary.protocol.webdav.requestresponse import RequestResponse
from caldavclientlibrary.protocol.webdav.definitions import methods
from caldavclientlibrary.protocol.webdav.definitions import headers
class CopyMoveBase(RequestResponse):
def __init__(self, session, url_old, absurl_new, overwrite=False, delete_original=True):
super(CopyMoveBase, self).__init__(session, methods.MOVE if delete_original else methods.COPY, url_old)
self.absurl_new = absurl_new
self.overwrite = overwrite
def setData(self, etag):
self.request_data = None
self.response_data = None
# Must have matching ETag
if etag:
self.etag = etag
self.etag_match = True
def addHeaders(self, hdrs):
# Do default
super(CopyMoveBase, self).addHeaders(hdrs)
# Add Destination header
hdrs.append((headers.Destination, self.absurl_new))
# Add Overwrite header
hdrs.append((headers.Overwrite, headers.OverwriteTrue if self.overwrite else headers.OverwriteFalse))
|
from django.db import models
from django.contrib.auth.models import User
from datetime import timedelta, datetime
class ClientProfile(models.Model):
"""
Model to store client profile .
address,company name, phone number as fields
"""
user = models.OneToOneField(User, related_name='profile')
address = models.TextField()
company_name = models.CharField(max_length=50)
phone_number = models.CharField(max_length=15)
def __unicode__(self):
return u"%s : %s " %(self.user.username, self.company_name)
class Ticket(models.Model):
"""
Model to store ticket
"""
PRIORITY = (
('L', 'Low'),
('N', 'Normal'),
('H', 'High'),
)
SLA = {'L':24, 'N':72, 'H':120}
STATUS = (
('N', 'New'),
('U', 'Under Investigation'),
('R', 'Resolved'),
('C', 'Closed'),
)
client = models.ForeignKey(User, related_name='ticket')
name = models.CharField(max_length=200)
date_time = models.DateTimeField(auto_now_add=True)
logged_by = models.ForeignKey(User, related_name='logged_ticket')
assigned_to = models.ForeignKey(User, related_name='assigned_ticket')
priority = models.CharField(max_length=1, choices=PRIORITY, default=PRIORITY[1][0])
status = models.CharField(max_length=1,choices=STATUS, default=STATUS[0][0])
estimated_completion_time = models.DateTimeField()
description = models.TextField(blank=True, null=True)
resolution = models.TextField(blank=True, null=True)
def get_absolute_url(self):
from django.core.urlresolvers import reverse
return reverse('ticket_detail', args=[str(self.id)])
def __unicode__(self):
return str(self.id) + ':' + self.name + ":" + unicode(self.client)
|
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import time
import genetic
start_time = time.time()
# make classification parameters
n_samples = 10000
n_features = 63
n_informative = 50
n_redundant = 10
n_repeated = 3
# genetic algorithm parameters
population_size = 80 # Population size.
n_parents = population_size // 2 # Number of parents inside the mating pool.
n_mutations = 3 # Number of elements to mutate.
n_generations = 60 # Number of generations.
X, y = make_classification(n_samples, n_features, n_informative, n_redundant, n_repeated)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
population_shape = (population_size, n_features)
# Starting population
new_population = np.random.randint(2, size=population_shape)
best_outputs = [] # Table for best outputs score in every generation
raw_logreg = LogisticRegression(penalty='none', solver='newton-cg', max_iter=1000, random_state=42)
raw_logreg.fit(X_train, y_train)
y_pred = raw_logreg.predict(X_test)
raw_logreg_score = raw_logreg.score(X_test, y_test)
raw_logit_roc_auc = roc_auc_score(y_test, raw_logreg.predict(X_test))
raw_fpr, raw_tpr, raw_thresholds = roc_curve(y_test, raw_logreg.predict_proba(X_test)[:, 1])
print('Fitness of raw logistic regression : ', raw_logreg_score)
for generation in range(n_generations):
print("Generation : ", generation+1)
# Measuring the fitness of each chromosome in the population.
calculation_time = time.time()
fitness = genetic.pop_fitness(X_train, X_test, y_train, y_test, new_population)
print('Generation calculation time : ', time.time()-calculation_time)
best_outputs.append(np.max(fitness))
print('Number of creatures with best fitness : ', (fitness == np.max(fitness)).sum())
# The best result in the current generation.
print("Best result : ", best_outputs[-1])
# Selecting the best parents for mating.
parents = genetic.select_mating_pool(new_population, fitness, n_parents)
# Generating next generation.
offspring_crossover = genetic.crossover(parents, offspring_size=(population_shape[0]-parents.shape[0], n_features))
# Adding some variations to the offspring using mutation.
offspring_mutation = genetic.mutation(offspring_crossover, n_mutations)
# Creating the new population based on the parents and offspring.
new_population[0:parents.shape[0], :] = parents
new_population[parents.shape[0]:, :] = offspring_mutation
# Getting the best solution after finishing all generations.
# At first, the fitness is calculated for each solution in the final generation.
fitness = genetic.pop_fitness(X_train, X_test, y_train, y_test, new_population)
# Then return the index of that solution corresponding to the best fitness.
best_match_idx = np.where(fitness == np.max(fitness))[0]
best_match_idx = best_match_idx[0]
best_solution = new_population[best_match_idx, :]
best_solution_indices = np.flatnonzero(best_solution)
best_solution_num_elements = best_solution_indices.shape[0]
best_solution_fitness = fitness[best_match_idx]
print("best_match_idx : ", best_match_idx)
print("best_solution : ", best_solution)
print("Selected indices : ", best_solution_indices)
print("Number of selected elements : ", best_solution_num_elements)
print("Best solution fitness : ", best_solution_fitness)
plt.figure()
plt.plot(best_outputs, label='Genetic algorithm')
plt.axhline(y=raw_logreg_score, xmin=0, xmax=n_generations, color='r', linestyle='--', label='Raw logit')
plt.xlabel("Generation")
plt.ylabel("Fitness")
plt.legend(loc="lower right")
plt.show()
"""
y_pred = logreg.predict(X_test)
print('Accuracy of logistic regression: {:.2f}'.format(logreg.score(X_test, y_test)))
confusion_matrix = confusion_matrix(y_test, y_pred)
print(confusion_matrix)
print(classification_report(y_test, y_pred))
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:, 1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive')
plt.ylabel('True Positive')
plt.title('RO characteristic')
plt.legend(loc="lower right")
plt.savefig('ROC')
plt.show()
"""
print("Program took %s seconds " % (time.time() - start_time))
|
from django.shortcuts import render
from cart import cart
from django.shortcuts import render_to_response
from django.template import RequestContext
from Bank.models import Category
def cart_view(request):
if request.method == "POST":
postdata = request.POST.copy()
if postdata['submit'] == 'Update':
item_id = postdata['item_id']
quantity = postdata['quantity']
item = cart.get_item(request, item_id)
if item:
if int(quantity) > 0:
item.quantity = int(quantity)
item.save()
elif postdata['submit'] == 'Delete':
item_id = postdata['item_id']
item = cart.get_item(request, item_id)
if item:
item.delete()
cart_items = cart.get_cart_items(request)
cart_item_count = cart.cart_item_count(request)
total_sum = cart.get_full_price(request)
categories = Category.objects.filter(is_active=True)
return render_to_response('cart.html', {'cart_item_count': cart_item_count, 'cart_items':cart_items, 'categories': categories, 'total_sum': total_sum}, context_instance=RequestContext(request))
|
#!/usr/bin/python
activate_this = '/var/www/project-catalog/venv/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.path.insert(0, "/var/www/project-catalog/")
from main import app as application
application.secret_key = 'project-catalog-key'
|
from django.shortcuts import render
from projects.models import Project
def project_index(request):
projects = Project.objects.all()
context = {
'projects': projects
}
return render(request, 'project_index.html', context)
def project_technologies(request, technology):
projects = Project.objects.filter(
technologies__name__contains=technology
)
context = {
'technology': technology,
'projects': projects
}
return render(request, 'project_technologies.html', context)
def project_detail(request, pk):
project = Project.objects.get(pk=pk)
context = {
'project': project
}
return render(request, 'project_detail.html', context)
|
# Author:jxy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# df = pd.read_excel(r"C:\Users\admin\Desktop\test.xlsx", sheet_name="目录")
# df = pd.read_csv(r"C:\Users\admin\Desktop\test1.csv", sep="@#", engine='python', encoding='utf-8')
df = pd.read_csv(r"C:\Users\admin\Desktop\test2.csv", sep="|", header=None)
print(df)
df.columns = ["id", "county", "room", "address1", "address2", "date", "value"]
# print(df[df["value"] > -27][["county", "address1", "value"]])
print(df.sort_values(by=["value"], ascending=False))
# df.index = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"]
# print(df.iloc[3:8, [2, 4, 6]])
# print(df['ID'].dtype)
# print(df['ID'].astype("float64"))
# print(df.describe())
# print(df.info())
# df1 = df.fillna(0)
# print(df1)
# print(df.shape)
|
from .finder import base_form
from .finder import odmiany_synonimow
class Question:
"""Potwierdza presupozycje pytan:
1) Kto zabił X w Y?
2) Kiedy zginal X?
3) Gdzie zginal X?
4) Jak zginal X?
"""
def __init__(self, zdanie = ""):
self.zdanie = zdanie.replace("?","")
self.name = ''
self.place = []
self.city = ''
if zdanie:
self.find()
self.city = base_form(self.city)
# Jesli UpperCase przed w to jest to imie(nazwisko), inaczej po w ostatni upper to imie(nazwisko)
def find(self):
if " w " in self.zdanie: index = self.zdanie.find(" w ")
elif " u " in self.zdanie: index = self.zdanie.find(" u ")
elif " na " in self.zdanie: index = self.zdanie.find(" na ")
citytmp = self.zdanie[index+2:] #zdanie po 'w'
city = []
for elem in citytmp.split():
if elem[0].isupper(): # teraz jest tylko po w ale duze litery
city.append(elem)
city = ' '.join(city)
self.place = city.split() #mozliwe miasta
for s in self.zdanie[:index].split(): #czy jest przed w cos z duzej?
if s[0].isupper() and s != "Kto":
self.name = s
if not self.name: #jesli nie ma
self.name = self.place[-1]
self.place = self.place[:-1]
self.city = " ".join(self.place)
self.name = base_form(self.name)
|
from tree_node_lib import *
class Solution:
def countNodes(self, root: TreeNode) -> int:
level = 0
cur = root
l = cur
r = cur
while 1:
l = l.left
r = r.right
if l and r :
level += 1
continue
elif not l and not r:
return 2**(level+1)-1
else:
break
l = 0
r = 2**(level + 1) - 1
while l<r:
m = (l + r) //2
cur = root
# binary search 하자 level 만큼 비트 필요
for b in reversed(range(level+1)):
if m & (2**b):
cur = cur.right
else:
cur = cur.left
if cur:
l = m + 1
else:
r = m
m = (l + r) //2
return 2 ** (level+1) + m-1
sol = Solution()
root = makeTree([1,2])
print(sol.countNodes(root)) |
import os
import torch
from util import dataset, transform
import torch.multiprocessing as mp
import torch.distributed as dist
def main_process():
""" """
return args['rank'] % 8 == 0
def train(train_loader):
""" """
print(args)
if main_process():
print('Main process runs in ', args)
for i, (input, target) in enumerate(train_loader):
print('hello from training with ', args)
def main_worker(gpu, ngpus_per_node, argss):
""" """
global args
print('Argss: ', argss)
args = argss
args['rank'] = gpu
rank = args['rank'] * ngpus_per_node + gpu
print(f'Rank: {rank}')
print(f'Args on {rank}: ', args)
dist.init_process_group(
backend=args['dist_backend'],
init_method=args['dist_url'],
world_size=args['world_size'],
rank=args['rank']
)
train_transform = transform.Compose([
transform.RandScale([args.scale_min, args.scale_max])
])
train_data = dataset.SemData(
split='train',
data_root=args['data_root'],
data_list=args['train_list'],
transform=train_transform
)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_data,
num_replicas=args.num_replica_per_dataset,
rank=args.dataset_rank
)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True
)
def main():
""" """
ngpus_per_node = 8
world_size = 1
world_size = ngpus_per_node * world_size
print(f'World size: {world_size}')
args = {
'world_size' : world_size,
'dist_url': 'tcp://127.0.0.1:6789',
'dist_backend': 'nccl',
'scale_min': 0.5, # minimum random scale
'scale_max': 2.0 # maximum random scale
'data_root':,
'train_list':
}
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
if __name__ == '__main__':
main()
|
import sqlite3
import sys
from wordcloud import WordCloud, STOPWORDS
import collections
import datetime
import matplotlib.pyplot as plt
from time import strftime
from sklearn.cluster import KMeans
import pandas as pd
# Code written for project
def analyze_data(df_messages, verbose, sample_size):
# Builds dictionary of all phone numbers and the number of texts corresponding to each phone number
number_time = {}
for id in df_messages["phone_number"]:
if not (str(id) in 'nan'):
try:
number_time[id] = number_time[id] + 1
except KeyError:
number_time[id] = 0
# Sets up data frame that is going to be used for the k-means analysis
coordinate_df = pd.DataFrame.from_dict(number_time, orient='index')
if verbose == 'y':
coordinate_df = coordinate_df.sample(sample_size)
# This for loops calculates the average difference for every text by every number in the coordinate_df data frame.
# The average difference is added to the dictionary avgDiff along with the phone number as a key.
avgDiff = {}
for id in list(coordinate_df.index):
forAvg = []
for x in df_messages[df_messages == id]['phone_number'].dropna().index:
date = df_messages['date'][x]
if df_messages['date'][x] in forAvg:
continue
elif df_messages['phone_number'][x] == id:
forAvg.append(date)
if len(forAvg) == 1:
avgDiff[id] = 0
continue
for x in range(0, len(forAvg) - 1):
forAvg[x] = abs(forAvg[x] - forAvg[x + 1])
forAvg.remove(forAvg[len(forAvg) - 1])
avgDiff[id] = sum(forAvg) / len(forAvg)
# Sort data so that the indices in avgDiff_df and coordinate_df match up
avgDiff_df = pd.DataFrame.from_dict(avgDiff, orient='index')
avgDiff_df = avgDiff_df.sort_index()
# Changes the difference from avg num of nanoseconds between texts to avg num of days between texts
avgDiff_df[0] = avgDiff_df[0].map(lambda x: x / 8.64E+13)
coordinate_df = coordinate_df.sort_index()
# Make a new column that adds the average difference between texts to the coordinate_df and renames columns
coordinate_df[1] = avgDiff_df[0]
coordinate_df.columns = ['Number of Texts', 'Average Days Between Texts']
# K-means analysis
kmeans = KMeans(n_clusters=3)
kmeans.fit(coordinate_df)
# Making plot
plt.scatter(coordinate_df['Number of Texts'], coordinate_df['Average Days Between Texts'], c=kmeans.labels_, cmap='rainbow')
print(kmeans.labels_)
coordinate_df['Clusters'] = pd.Series(kmeans.labels_, index=coordinate_df.index)
plt.title('Number of Texts Compared to Average Days Between Texts')
plt.xlabel('Number of Texts')
plt.ylabel('Average Days Between Texts')
fig = plt.gcf()
fig.set_size_inches(8, 8)
plt.show()
print(coordinate_df)
# Extra stuff I had previously included when playing around with this data
def extras(df_messages):
dt = datetime.date(year=2001, day=1, month=1)
dtu = (dt - datetime.date(1970, 1, 1)).total_seconds()
df_messages['date'] = df_messages['date'].map(
lambda date: datetime.datetime.fromtimestamp(int(date / 1000000000) + dtu))
months = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0}
for date in df_messages['date']:
months[date.month] = months[date.month] + 1
list1 = sorted(months.items())
x, y = zip(*list1)
plt.title("Texts Per Month")
plt.xlabel('Months')
plt.ylabel("Texts")
plt.bar(x, y, edgecolor='black')
plt.show()
years = {2017: 0, 2018: 0, 2019: 0}
for date in df_messages['date']:
years[date.year] = years[date.year] + 1
list1 = sorted(years.items())
x, y = zip(*list1)
plt.title("Texts Per Year")
plt.xlabel('Years')
plt.ylabel("Texts")
plt.bar(x, y, edgecolor='black')
plt.show()
rw = {'Sent': 0, 'Recieved': 0}
for x in rw:
for i in df_messages['is_sent']:
if i == 1:
rw['Sent'] = rw['Sent'] + 1
else:
rw['Recieved'] = rw['Recieved'] + 1
list1 = sorted(rw.items())
x, y = zip(*list1)
plt.title("Sent / Recieved")
plt.xlabel('Status')
plt.ylabel("Texts")
plt.bar(x, y, edgecolor='black')
plt.show()
if __name__ == '__main__':
if len(sys.argv) < 1:
sys.exit("USAGE: " + sys.argv[0] + " path/to/chat.db")
file_name = sys.argv[1]
print(file_name)
print("Welcome to the iMessage database analyzer")
print("Manually set sample size? (May drastically impact speed if not used) (y/n)")
verbose = input()
sample_size = ""
if verbose == 'y':
print("Entered desired sample size")
sample_size = int(input())
print("Running...")
else:
print("Running...")
# Code to clean up data for ease of analysis
conn = sqlite3.connect(file_name)
# connect to the database
cur = conn.cursor()
# get the names of the tables in the database
cur.execute(" select name from sqlite_master where type = 'table' ")
# get the 10 entries of the message table using pandas
messages = pd.read_sql_query("select * from message", conn)
# get the handles to apple-id mapping table
handles = pd.read_sql_query("select * from handle", conn)
# and join to the messages, on handle_id
messages.rename(columns={'ROWID': 'message_id'}, inplace=True)
handles.rename(columns={'id': 'phone_number', 'ROWID': 'handle_id'}, inplace=True)
merge_level_1 = temp = pd.merge(messages[['text', 'handle_id', 'date', 'is_sent', 'message_id']],
handles[['handle_id', 'phone_number']], on='handle_id', how='left')
# get the chat to message mapping
chat_message_joins = pd.read_sql_query("select * from chat_message_join", conn)
# and join back to the merge_level_1 table
df_messages = pd.merge(merge_level_1, chat_message_joins[['chat_id', 'message_id']], on='message_id', how='left')
analyze_data(df_messages, verbose, sample_size)
print("Would you like to view some extras? (y/n)")
ans = input()
if ans == 'y':
extras(df_messages)
|
def addFive(x):
return x + 1
numbers = [1,2,3,4,5]
mappedList = list(map(addFive, numbers))
print("The mapped list are , "+str(mappedList))
list2 = list(map((lambda x: x+2), numbers))
print(list2) |
import numpy as np
import pandas as pd
'''
Function to fill nan for the teams' head to head home team win rate
'''
def fill_nan_head_2_head_home_team_win_rate(match_df, full_df):
value = match_df['HEAD_2_HEAD_HOME_TEAM_WINS']
if not np.isnan(value):
return value
else:
# Find average
all_head_to_head_avg = full_df[(full_df['home_team_api_id']==
match_df['home_team_api_id']) & (full_df['away_team_api_id']==
match_df['away_team_api_id'])]
mean_home_win_rate = all_head_to_head_avg['HEAD_2_HEAD_HOME_TEAM_WINS'].mean(skipna=True)
# If still Na, i.e. no history
if np.isnan(mean_home_win_rate):
mean_home_win_rate = 0.33
return mean_home_win_rate
'''
Function to fill nan for the teams' head to head home team loss rate
'''
def fill_nan_head_2_head_home_team_loss_rate(match_df, full_df):
value = match_df['HEAD_2_HEAD_HOME_TEAM_LOSS']
if not np.isnan(value):
return value
else:
# Find average
all_head_to_head_avg = full_df[(full_df['home_team_api_id']==
match_df['home_team_api_id']) & (full_df['away_team_api_id']==
match_df['away_team_api_id'])]
mean_home_loss_rate = all_head_to_head_avg['HEAD_2_HEAD_HOME_TEAM_LOSS'].mean(skipna=True)
# If still Na, i.e. no history
if np.isnan(mean_home_loss_rate):
mean_home_loss_rate = 0.33
return mean_home_loss_rate
'''
Function to fill nan for the teams' head to head draw rate
'''
def fill_nan_head_2_head_draw(match_df, full_df):
value = match_df['HEAD_2_HEAD_DRAW']
if not np.isnan(value):
return value
else:
# Find average
all_head_to_head_avg = full_df[(full_df['home_team_api_id']==
match_df['home_team_api_id']) & (full_df['away_team_api_id']==
match_df['away_team_api_id'])]
mean_draw_rate = all_head_to_head_avg['HEAD_2_HEAD_DRAW'].mean(skipna=True)
if np.isnan(mean_draw_rate):
mean_draw_rate = 0.33
return mean_draw_rate
'''
Function to fill nan for the home team's ALL TIME HOME RECORD
'''
def fill_nan_home_team_win_rate_all_time(match_df, full_df):
value = match_df['HOME_WIN_RATE']
if not np.isnan(value):
return value
else:
# Find average
all_home_matches = full_df[(full_df['home_team_api_id']==
match_df['home_team_api_id'])]
mean_home_win_rate = all_home_matches['HOME_WIN_RATE'].mean(skipna=True)
return mean_home_win_rate
'''
Function to fill nan for the home team's ALL TIME HOME DRAWS
'''
def fill_nan_home_team_draw_rate_all_time(match_df, full_df):
value = match_df['HOME_DRAW_RATE']
if not np.isnan(value):
return value
else:
# Find average
all_home_matches = full_df[(full_df['home_team_api_id']==
match_df['home_team_api_id'])]
mean_draw_rate = all_home_matches['HOME_DRAW_RATE'].mean(skipna=True)
return mean_draw_rate
'''
Function to fill nan for the away team's ALL TIME AWAY RECORD
'''
def fill_nan_away_team_win_rate_all_time(match_df, full_df):
value = match_df['AWAY_WIN_RATE']
if not np.isnan(value):
return value
else:
# Find average
all_away_matches = full_df[(full_df['away_team_api_id']==
match_df['away_team_api_id'])]
mean_away_win_rate = all_away_matches['AWAY_WIN_RATE'].mean(skipna=True)
return mean_away_win_rate
'''
Function to fill nan for the away team's ALL TIME AWAY DRAWS
'''
def fill_nan_away_team_draw_rate_all_time(match_df, full_df):
value = match_df['AWAY_DRAW_RATE']
if not np.isnan(value):
return value
else:
# Find average
all_away_matches = full_df[(full_df['away_team_api_id']==
match_df['away_team_api_id'])]
mean_away_draw_rate = all_away_matches['AWAY_DRAW_RATE'].mean(skipna=True)
return mean_away_draw_rate
'''
Function to fill nan for the away team's away record THIS SEASON
'''
def fill_nan_away_team_win_rate_this_season(match_df, full_df):
value = match_df['AWAY_WIN_RATE_THIS_SEASON']
if not np.isnan(value):
return value
else:
# Find average
all_away_matches_this_season = full_df[(full_df['away_team_api_id']==
match_df['away_team_api_id']) &
(full_df['season']==
match_df['season'])]
mean_away_win_rate = all_away_matches_this_season['AWAY_WIN_RATE_THIS_SEASON'].mean(skipna=True)
if np.isnan(mean_away_win_rate):
all_away_matches = full_df[(full_df['away_team_api_id']==
match_df['away_team_api_id'])]
mean_away_win_rate = all_away_matches['AWAY_WIN_RATE'].mean(skipna=True)
return mean_away_win_rate
'''
Function to fill nan for the away team's draw record THIS SEASON
'''
def fill_nan_away_team_draw_rate_this_season(match_df, full_df):
value = match_df['AWAY_DRAW_RATE_THIS_SEASON']
if not np.isnan(value):
return value
else:
# Find average
all_away_matches_this_season = full_df[(full_df['away_team_api_id']==
match_df['away_team_api_id']) &
(full_df['season']==
match_df['season'])]
mean_away_draw_rate = all_away_matches_this_season['AWAY_WIN_RATE_THIS_SEASON'].mean(skipna=True)
if np.isnan(mean_away_draw_rate):
all_away_matches = full_df[(full_df['away_team_api_id']==
match_df['away_team_api_id'])]
mean_away_draw_rate = all_away_matches['AWAY_DRAW_RATE_THIS_SEASON'].mean(skipna=True)
return mean_away_draw_rate
'''
Function to fill nan for the home team's home record THIS SEASON
'''
def fill_nan_home_team_win_rate_this_season(match_df, full_df):
value = match_df['HOME_WIN_RATE_THIS_SEASON']
if not np.isnan(value):
return value
else:
# Find average
all_home_matches_this_season = full_df[(full_df['home_team_api_id']==
match_df['home_team_api_id']) &
(full_df['season']==
match_df['season'])]
mean_home_win_rate = all_home_matches_this_season['HOME_WIN_RATE_THIS_SEASON'].mean(skipna=True)
return mean_home_win_rate
'''
Function to fill nan for the home team's draw record THIS SEASON
'''
def fill_nan_home_team_draw_rate_this_season(match_df, full_df):
value = match_df['HOME_DRAW_RATE_THIS_SEASON']
if not np.isnan(value):
return value
else:
# Find average
all_home_matches_this_season = full_df[(full_df['home_team_api_id']==
match_df['home_team_api_id']) &
(full_df['season']==
match_df['season'])]
mean_home_draw_rate = all_home_matches_this_season['HOME_DRAW_RATE_THIS_SEASON'].mean(skipna=True)
return mean_home_draw_rate
'''
Function to fill nan for the away team's ALL TIME AWAY RECORD at this ground
'''
def fill_nan_away_team_win_rate_all_time_at_this_ground(match_df, full_df):
value = match_df['AWAY_WIN_RATE_AT_THIS_GROUND']
if not np.isnan(value):
return value
else:
# Find average
all_away_matches_at_this_ground = full_df[(full_df['away_team_api_id']==
match_df['away_team_api_id']) &
(full_df['home_team_api_id']==
match_df['home_team_api_id'])]
mean_away_win_rate = all_away_matches_at_this_ground['AWAY_WIN_RATE_AT_THIS_GROUND'].mean(skipna=True)
if np.isnan(mean_away_win_rate):
all_away_matches = full_df[(full_df['away_team_api_id']==
match_df['away_team_api_id'])]
mean_away_win_rate = all_away_matches['AWAY_WIN_RATE'].mean(skipna=True)
return mean_away_win_rate
'''
Function to fill nan for the away team's ALL TIME AWAY RECORD at this ground
'''
def fill_nan_away_team_draw_rate_all_time_at_this_ground(match_df, full_df):
value = match_df['AWAY_DRAW_RATE_AT_THIS_GROUND']
if not np.isnan(value):
return value
else:
# Find average
all_away_matches_at_this_ground = full_df[(full_df['away_team_api_id']==
match_df['away_team_api_id']) &
(full_df['home_team_api_id']==
match_df['home_team_api_id'])]
mean_away_win_rate = all_away_matches_at_this_ground['AWAY_DRAW_RATE_AT_THIS_GROUND'].mean(skipna=True)
if np.isnan(mean_away_win_rate):
all_away_matches = full_df[(full_df['away_team_api_id']==
match_df['away_team_api_id'])]
mean_away_win_rate = all_away_matches['AWAY_DRAW_RATE'].mean(skipna=True)
return mean_away_win_rate
'''
Function to fill nan for the a team's form guide. We just get the most common form guide
and replace np.nan with it
'''
def fill_nan_form_guide(match_df, full_df, team_type, all_possibility):
if team_type == 'home':
value = match_df['HOME_TEAM_FORM_GUIDE']
else:
value = match_df['AWAY_TEAM_FORM_GUIDE']
if not pd.isnull((value)):
return value
else:
if team_type == 'home':
team_api_id = match_df['home_team_api_id']
else:
team_api_id = match_df['away_team_api_id']
# Matches that contain this team
this_team_all_matches_this_season_before_today = full_df[(full_df['season'] == match_df['season']) &
( (full_df['home_team_api_id'] == team_api_id) |
(full_df['away_team_api_id'] == team_api_id))]
form_guide_list_this_team = list()
for index, row in this_team_all_matches_this_season_before_today.iterrows():
if row['home_team_api_id'] == team_api_id:
if not pd.isnull(row['HOME_TEAM_FORM_GUIDE']):
form_guide_list_this_team.append(row['HOME_TEAM_FORM_GUIDE'])
else:
if not pd.isnull(row['AWAY_TEAM_FORM_GUIDE']):
form_guide_list_this_team.append(row['AWAY_TEAM_FORM_GUIDE'])
if len(form_guide_list_this_team) == 0:
import random
return random.choice(all_possibility)
from collections import Counter
c = Counter(form_guide_list_this_team)
#print c
#print c.most_common(1)[0][0]
return (c.most_common(1)[0][0])
def fill_zeros_age_bmi(match_df,features,team,player_types):
player_age=match_df[features[0]]
player_bmi=match_df[features[1]]
players_age=[team + "_" + x + "_age" for x in player_types]
players_bmi=[team + "_" + x + "_bmi" for x in player_types]
all_players_team_age=match_df[players_age]
all_players_team_bmi=match_df[players_bmi]
if player_age==0:
mean_age = sum(all_players_team_age)/3
if player_bmi==0:
mean_bmi = sum(all_players_team_bmi)/3
else:
mean_age=player_age
mean_bmi=player_bmi
return mean_age,mean_bmi
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 17 13:49:59 2020
@author: Hp
"""
import pandas as pd
details = pd.Series([[{"Name":"Suresh","C.NO":"hjyt64882991z","Address":"H.No:12,2nd cross,RC road,Hassan","Ph no":"6677884455"}],
[{"Name":"Mahesh","C.NO":"yeud64738274k","Address":"H.No:45,1st stage,BG Nagar,Hassan","Ph no":"9988447332"}],
[{"Name":"Sujith","C.NO":"trjw63728364j","Address":"H.No:67,14th cross,,Banglore","Ph no":"9988446633"}],
[{"Name":"Siddiq","C.NO":"qwdk648274992m","Address":"H.No:87,2nd cross,Near City Bus stand,Hassan","Ph no":"9522664455"}],
[{"Name":"Aishwarya","C.NO":"dwde3127248b","Address":"H.No:101,2nd cross,Near Main Bus Stad,Hassan","Ph no":"8855221144"}],
[{"Name":"Kalki","C.NO":"dwde3127248b","Address":"H.No:108,2nd cross,Near Main Bus Stad,Mangaluru","Ph no":"8852222255"}],
[{"Name":"Kushal","Car No":"S7GT7","C.NO":"ddfe77886678b","Address":"H.No:108,2nd cross,Near Main Bus Stad Tumkur","Ph no":"8852222255"},
{"Name":"Nagesh","Car No":"57GT1","C.NO":"drut6564738281","Address":"H.No:123,Near Golden Temple ,Coorgh","Ph no":"7788665554"}]
], index=['MCLRNF1', "AP28DU4396", "TS07FX3534","MA01AV8866","HR26D0555","6P609","S7GTT"])
|
def name(a):
print(f'hello, {a}')
names = ['sergei', 'misha', 'ilia', 'alex', 'sasha']
for i in names:
name(i)
|
# Generated by Django 2.2.2 on 2019-06-08 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainattendance', '0002_auto_20190608_0644'),
]
operations = [
migrations.CreateModel(
name='CurrentAttendance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('batch', models.CharField(max_length=10)),
],
),
]
|
import io, os, time, json
import logging
from datetime import datetime
import tempfile
import joblib
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from celery import current_task
from .app import app as celery_app
from application.utils.mysql_db import update_json_data
from application.utils.minio_connection import MinioClient
# logging
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s | {%(pathname)s:%(lineno)d} | %(module)s | %(levelname)s | %(funcName)s | %(message)s",
)
try:
minio_obj = MinioClient()
minio_client = minio_obj.client()
except Exception as e:
logging.error(str(e))
@celery_app.task(name="train_classifier")
def train_clf(data_json):
# get csv data
try:
file_data = minio_client.get_object("dataset", f'{data_json["dataset_id"]}.csv')
buffer_data = io.BytesIO(file_data.data)
df = pd.read_csv(buffer_data)
except Exception as e:
msg_result = "dataset_id is wrong"
logging.error(msg_result + f": {str(e)}")
res_data = {
"pk_field": "model_id",
"model_id": current_task.request.id,
"update_data": {"finished": datetime.now(), "duration": 0, "result": msg_result, },
}
try:
update_json_data(res_data, "model_training")
except:
pass
return msg_result
# select x y
try:
X = df[[col for col in data_json["feature_column"].split(",")]]
y = df[data_json["class_column"]]
# test split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=data_json["test_ratio"], random_state=12345
)
except Exception as e:
msg_result = "feature_column and class_column are wrong"
logging.error(msg_result + f": {str(e)}")
res_data = {
"pk_field": "model_id",
"model_id": current_task.request.id,
"update_data": {"finished": datetime.now(), "duration": 0, "result": msg_result, },
}
try:
update_json_data(res_data, "model_training")
except:
pass
return msg_result
# training with selecting models!
tic = time.time()
try:
if data_json["model_type"] == "logistic_regression":
model = LogisticRegression()
elif data_json["model_type"] == "random_forest":
model = RandomForestClassifier()
else:
raise Exception("model name not found!")
model.fit(X_train, y_train)
except Exception as e:
msg_result = "model name not found!"
logging.error(msg_result + f": {str(e)}")
res_data = {
"pk_field": "model_id",
"model_id": current_task.request.id,
"update_data": {"finished": datetime.now(), "duration": 0, "result": msg_result, },
}
try:
update_json_data(res_data, "model_training")
except:
pass
return msg_result
toc = time.time()
duration = toc - tic
# get test result
try:
y_pred = model.predict(X_test)
json_result = classification_report(y_test, y_pred, output_dict=True)
except Exception as e:
msg_result = "error when trying to predic test data!"
logging.error(msg_result + f": {str(e)}")
res_data = {
"pk_field": "model_id",
"model_id": current_task.request.id,
"update_data": {"finished": datetime.now(), "duration": 0, "result": msg_result, },
}
try:
update_json_data(res_data, "model_training")
except:
pass
return msg_result
res_data = {
"pk_field": "model_id",
"model_id": current_task.request.id,
"update_data": {"finished": datetime.now(), "duration": duration, "result": json.dumps(json_result), },
}
# save to minio
logging.info("Write to minio: ")
try:
with tempfile.TemporaryFile() as fp:
joblib.dump(model, fp)
fp.seek(0)
_buffer = io.BytesIO(fp.read())
_length = _buffer.getbuffer().nbytes
minio_client.put_object(
bucket_name="models",
object_name=f"{res_data['model_id']}.joblib",
data=_buffer,
length=_length,
)
logging.info("Saved to minio: ")
except Exception as e:
msg_result = "error when trying to save the model, try again later!"
logging.error(msg_result + f": {str(e)}")
res_data = {
"pk_field": "model_id",
"model_id": current_task.request.id,
"update_data": {"finished": datetime.now(), "duration": duration, "result": msg_result, },
}
try:
update_json_data(res_data, "model_training")
except:
pass
return msg_result
# save results to mysql
try:
update_json_data(res_data, "model_training")
except Exception as e:
logging.error(f"something went wrong during save to db: {str(e)}")
json_result["duration"] = duration
return json_result
|
# import pytz
# from datetime import datetime
# from timezonefinder import TimezoneFinder
# tf = TimezoneFinder()
# latitude, longitude = 28.67 , 77.22
# Time_zone=tf.timezone_at(lng=longitude, lat=latitude) # returns 'Europe/Berlin'
# print(Time_zone)
# # UTC = pytz.utc
# IST = pytz.timezone(Time_zone)
# # print("UTC in Default Format : ",
# # datetime.now(UTC))
# datetime_ist=datetime.now(IST)
# print("IST in Default Format : ", datetime_ist.strftime('%H:%M:%S %Z %z') )
# print("IST in Default Format : ", datetime_ist.strftime( '%d' " " '%B' " " '%A'" "'%Y') )
import requests
r3=requests.get('https://api.unsplash.com/search/collections?page=1&query=newdelhi&client_id=VPYbAHkfJHrTcEOQxaw2SN0dDE6w81mLzzAKtKEELf4').json()
# print(r3)
res = [sub['preview_photos'] for sub in r3['results']]
# print(res)
url=[sub['urls'] for item in res for sub in item ]
thumb=[item['thumb'] for item in url]
print(thumb)
|
import pyworld as pw
import sounddevice as sd
import librosa
import numpy as np
import math
from operator import sub
from scipy.io.wavfile import write
x, fs = librosa.load('../data/f1_005.wav', dtype='double', sr=None)
_f0, t = pw.dio(x, fs) # raw pitch extractor
f0 = pw.stonemask(x, _f0, t, fs) # pitch refinement
sp = pw.cheaptrick(x, f0, t, fs) # extract smoothed spectrogram
ap = pw.d4c(x, f0, t, fs) # extract aperiodicity
#y = pw.synthesize(f0*2**(3/12), sp, ap, fs)
#mix = y[0:len(x)-len(y)] + x
#sd.play(mix, fs)
chorus = np.zeros(f0.size)
phonetic = [16.352, 18.354, 20.602, 21.827, 24.5, 27.5, 30.868]
for k, freq_f0 in enumerate(f0):
if freq_f0==0:
continue
temp = freq_f0/phonetic
log2temp = [math.log2(i) for i in temp]
diff = list(map(sub, log2temp, [round(i) for i in log2temp]))
diff = [abs(i) for i in diff]
idx = diff.index(min(diff))
if idx==0 or idx==3 or idx==4:
chorus[k] = freq_f0*2**(4/12)
else:
chorus[k] = freq_f0*2**(3/12)
y = pw.synthesize(chorus, sp, ap, fs)
mix = y[0:len(x)-len(y)]*0.6 + x
sd.play(mix, fs)
write('f1_005_chorus_up3.wav', fs, mix) |
"""
Helpers
==========================
Commonly used generic data functions
- Create date: 2018-12-16
- Update date: 2019-01-03
- Version: 1.1
Notes:
==========================
- v1.0: Initial version
- v1.1: Add join helper function
"""
import datetime
from dateutil.relativedelta import relativedelta
import pandas as pd
from ant_data.static.GEOGRAPHY import COUNTRY_LIST
from ant_data.static.TIME import TZ
def local_date_str(country):
if country not in COUNTRY_LIST:
raise Exception(f'{country} is not a valid country')
tz = TZ.get(country)
local_date = pd.Timestamp.now(tz=tz).date()
return local_date.isoformat()
def local_date_dt(country):
if country not in COUNTRY_LIST:
raise Exception(f'{country} is not a valid country')
tz = TZ.get(country)
local_date = pd.Timestamp.now(tz=tz).date()
return local_date
def shift_date_str(date_str, days=0, weeks=0, months=0, years=0):
date_dt = datetime.date.fromisoformat(date_str)
shifted_dt = date_dt + relativedelta(days=days, weeks=weeks, months=months, years=years)
shifted_str = shifted_dt.isoformat()
return shifted_str
def shift_date_dt(date_dt, days=0, weeks=0, months=0, years=0):
shifted_dt = date_dt + relativedelta(days=days, weeks=weeks, months=months, years=years)
return shifted_dt
def date_str(date_dt):
return date_dt.isoformat()
def date_dt(date_str):
return datetime.date.fromisoformat(date_str)
def start_interval_str(date_str, interval):
date = datetime.date.fromisoformat(date_str)
if interval == 'day':
pass
elif interval == 'week':
date = date + pd.DateOffset(days=(7 - date.isoweekday()))
elif interval == 'month':
date = date - pd.DateOffset(days=(date.day-1))
elif interval == 'quarter':
qdate = (date.month - 1) // 3 + 1
date = datetime.datetime(date.year, 3 * qdate - 2, 1)
elif interval == 'year':
date = datetime.datetime(date.year, 1, 1)
return date.date().isoformat()
def end_interval_str(date_str, interval):
date = datetime.date.fromisoformat(date_str)
if interval == 'day':
pass
elif interval == 'week':
date = date + pd.DateOffset(days=(7 - date.isoweekday()))
elif interval == 'month':
if not pd.Timestamp(date).is_month_end:
date = date + pd.offsets.MonthEnd()
elif interval == 'quarter':
if not pd.Timestamp(date).is_quarter_end:
date = date + pd.offsets.QuarterEnd()
elif interval == 'year':
if not pd.Timestamp(date).is_year_end:
date = date + pd.offsets.YearEnd()
return date.date().isoformat()
def start_interval_dt(date, interval):
if interval == 'day':
pass
elif interval == 'week':
date = date + pd.DateOffset(days=(7 - date.isoweekday()))
elif interval == 'month':
date = date - pd.DateOffset(days=(date.day-1))
elif interval == 'quarter':
qdate = (date.month - 1) // 3 + 1
date = datetime.datetime(date.year, 3 * qdate - 2, 1)
elif interval == 'year':
date = datetime.datetime(date.year, 1, 1)
return date
def end_interval_dt(date, interval):
if interval == 'day':
pass
elif interval == 'week':
date = date + pd.DateOffset(days=(7 - date.isoweekday()))
elif interval == 'month':
if not pd.Timestamp(date).is_month_end:
date = date + pd.offsets.MonthEnd()
elif interval == 'quarter':
if not pd.Timestamp(date).is_quarter_end:
date = date + pd.offsets.QuarterEnd()
elif interval == 'year':
if not pd.Timestamp(date).is_year_end:
date = date + pd.offsets.YearEnd()
return date
# TODO:P2
def convert_timestamp_local(timestamp):
pass
def join_df(index, join_type, *args):
"""Helper function to join multiple DataFrames or columns from multiple
DataFrames
Args:
index (str): Index name on which to perform the join. Must be the SAME
across all DataFrames.
join_type (str): Join type, options are 'left', 'right, 'inner', 'outer'
*args: Variable length argument list. List is composed of DataFrames or
DataFrame columns.
Returns:
DataFrame: Joined DataFrame.
"""
arg_types = { type(x) for x in args }
if not arg_types.issubset({pd.core.frame.DataFrame, pd.core.frame.Series}):
raise Exception('Invalid arg type to merge')
if len(args) < 2:
raise Exception('At least two arguments must be passed to join')
def to_df(obj):
"""Simple function to convert an object to a DataFrame"""
return obj if isinstance(obj, pd.core.frame.DataFrame) else pd.DataFrame(obj)
obj = [to_df(x) for x in args]
df = obj[0]
for i in range(1, len(obj)):
df = df.merge(obj[i], on=index, how=join_type)
return df
|
from meshgen.maincastlemesh import MainCastleMesh
from meshgen.castlewallmesh import CastleWallMesh
from meshgen.quadmesh import QuadMesh
class CastleMesh:
def __init__(self):
self.total_size1 = 15
self.total_size2 = 15
self.total_size3 = 50
self.space_to_wall = 1.2
def create(self):
castle = MainCastleMesh()
castle.total_size1 = self.total_size1
castle.total_size2 = self.total_size2
castle.total_size3 = self.total_size3
mesh_builder = castle.create()
quad = QuadMesh()
quad.size1 = self.space_to_wall * self.total_size1
quad.size2 = self.space_to_wall * self.total_size2
wall = CastleWallMesh()
# wall.boundary = quad.create().get_submesh(["border"])
mesh_builder = wall.create().join(mesh_builder)
return mesh_builder
|
from django.http import Http404, HttpResponse
from django.shortcuts import render
from rest_framework.generics import (
ListCreateAPIView,
RetrieveUpdateDestroyAPIView,
)
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from .serializers import (
ProdutoSerializer,
LoteSerializer
)
from .models import (
Lote,
Produto,
Tipo
)
# Create your views here.
class LoteList(ListCreateAPIView):
queryset = Lote.objects.all()
serializer_class = LoteSerializer
class LoteDetail(RetrieveUpdateDestroyAPIView):
queryset = Lote.objects.all()
serializer_class = LoteSerializer
class LoteBusca(APIView):
def get_object(self, nome):
try:
return Lote.objects.filter(nome__icontains=nome)
except Lote.DoesNotExist:
raise Http404
def get(self, request, nome):
lotes = self.get_object(nome)
lotes_s = LoteSerializer(lotes, many=True)
return Response(lotes_s.data)
class ProdutoList(ListCreateAPIView):
queryset = Produto.objects.all().order_by('nome')
serializer_class = ProdutoSerializer
"""
Ao adicionar um lote alocamos os dados em seus devidos lugares apropriados
"""
def post(self, request, *args, **kwargs):
try:
data = request.data
preco_lote = float(data['preco_unidade']) * int(data['unidades'])
tipo = Tipo.objects.get(pk=int(data['tipo']))
produto = Produto(
nome=data['nome_produto'],
preco=float(data['preco_unidade']),
codigo=data['codigo'],
tipo=tipo,
fabricacao=data['data_fabricacao'],
validade=data['validade'],
unidades=int(data['unidades'])
)
produto.save()
lotes = []
for lote in range(int(data['lotes'])):
l = Lote(
codigo=data['codigo'],
quantidade=int(data['unidades']),
fabricacao=data['data_fabricacao'],
validade=data['validade'],
produto=produto,
preco=float(preco_lote)
)
lotes.append(l)
Lote.objects.bulk_create(lotes)
produto = ProdutoSerializer(produto)
return Response(produto.data, status=status.HTTP_201_CREATED)
except Exception:
print()
return self.create(request, *args, **kwargs)
class ProdutoDetail(RetrieveUpdateDestroyAPIView):
queryset = Produto.objects.all().order_by('nome')
serializer_class = ProdutoSerializer
class ProdutoBusca(APIView):
def get_object(self, nome):
try:
return Produto.objects.filter(nome__icontains=nome)
except Produto.DoesNotExist:
raise Http404
def get(self, request, nome):
produtos = self.get_object(nome)
produtos_s = ProdutoSerializer(produtos, many=True)
return Response(produtos_s.data)
|
# Under MIT License, see LICENSE.txt
from pyhermes import McuCommunicator
from Engine.Communication.sender.sender_base_class import Sender
from Engine.robot import MAX_LINEAR_SPEED, MAX_ANGULAR_SPEED
from Util.constant import KickForce, DribbleState
from Util.geometry import clamp
import numpy as np
class SerialCommandSender(Sender):
def connect(self, connection_info):
return McuCommunicator(timeout=0.1)
def send_packet(self, packets_frame):
try:
for packet in packets_frame.packet:
if np.isnan(packet.command.x) or \
np.isnan(packet.command.y) or \
np.isnan(packet.command.orientation):
continue
cx = clamp(packet.command.x, -MAX_LINEAR_SPEED, MAX_LINEAR_SPEED)
cy = clamp(packet.command.y, -MAX_LINEAR_SPEED, MAX_LINEAR_SPEED)
orien = clamp(packet.command.orientation, -MAX_ANGULAR_SPEED, MAX_ANGULAR_SPEED)
self.connection.sendSpeedAdvance(packet.robot_id,
cx/1000,
cy/1000,
orien,
packet.charge_kick,
self.translate_kick_force(packet.kick_force),
self.translate_dribbler_speed(packet.dribbler_state))
except AttributeError:
raise RuntimeError("You should update your pyhermes, by reinstalling the requirement:"
"'pip install -r requirements.txt --upgrade'")
@staticmethod
def translate_kick_force(kick_force: KickForce) -> int:
kick_translation = {KickForce.NONE: 0,
KickForce.LOW: 10, # 1 m/s
KickForce.MEDIUM: 18, # 2 m/s
KickForce.HIGH: 60} # 5.5 m/s
return kick_translation[kick_force]
@staticmethod
def translate_dribbler_speed(dribbler_speed: DribbleState) -> int:
dribbler_translation = {DribbleState.AUTOMATIC: 0,
DribbleState.FORCE_STOP: 0,
DribbleState.FORCE_SPIN: 3}
return dribbler_translation[dribbler_speed] |
import urllib2, urllib
import json, csv
import pprint as pp
import random
import time
from datetime import datetime, timedelta
import os, re, sys
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import boto
def convert_dataypes(x):
try:
return float(re.sub('[$-+]', '', x))
except Exception, e:
return x
def get_json(url):
try:
src = urllib2.urlopen(url).read()
rsp = json.loads(src)
except:
rsp = {}
return rsp
def get_weekday():
daydict = {1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', 7: 'Sunday'}
now = datetime.now() + timedelta(hours=3)
today = now.isoweekday()
return daydict.get(today)
def get_hour():
hourdict = {0: '0:00-1:00', 1: '1:00-2:00', 2: '2:00-3:00', 3: '3:00-4:00', 4: '4:00-5:00', 5: '5:00-6:00',
6: '6:00-7:00', 7: '7:00-8:00', 8: '8:00-9:00', 9: '9:00-10:00', 10: '10:00-11:00', 11: '11:00-12:00',
12: '12:00-13:00', 13: '13:00-14:00', 14: '14:00-15:00', 15: '15:00-16:00', 16: '16:00-17:00', 17: '17:00-18:00',
18: '18:00-19:00', 19: '19:00-20:00', 20: '20:00-21:00', 21: '21:00-22:00', 22: '22:00-23:00', 23: '23:00-24:00'}
now = datetime.now() + timedelta(hours=-5)
cur_hour = now.hour
return hourdict.get(cur_hour)
dirname, filename = os.path.split(os.path.abspath(__file__))
base_uri = "http://query.yahooapis.com/v1/public/yql?"
# define some stocks
stocks = [line.strip() for line in open(dirname + '/tickers.txt').read().split('\n')]
#encapsulate for the query
stocks = ["'" + stock + "'" for stock in stocks]
with open(dirname + '/tickers_funds.csv', 'rU') as funds:
FundReader = csv.reader(funds)
FundDict = dict((rows[0],rows[1]) for rows in FundReader)
random.shuffle(stocks)
cur_date = datetime.now() #+ timedelta(hours=8)
time_stamp = str(cur_date)
year = str(cur_date.year)
month = str(cur_date.month)
day = str(cur_date.day)
hour = str(cur_date.hour)
date_plug = 'y='+year+'/m='+month+'/d='+day+'/h='+hour+'/'
#ubuntu_filename = '/Users/admin/Desktop/stockdata_'+time_stamp+'.csv'
ubuntu_filename = '/home/ubuntu/repo/flatfiles/stockdata_'+time_stamp+'.csv'
s3_filename = 'stockdata/'+date_plug+'stockdata_'+time_stamp+'.csv'
f = open(ubuntu_filename, 'wb')
#f = open('/Users/admin/Desktop/Demo_Data/TickerTracker/Stock_Data/stockdata_'+time_stamp+'.csv', 'wb')
w = csv.writer(f)
columns = [u'AfterHoursChangeRealtime', u'Ask', u'AskRealtime', u'AverageDailyVolume', u'Bid', u'BidRealtime', u'BookValue', u'Change', u'ChangeFromYearHigh', u'ChangeFromYearLow', u'ChangePercentRealtime', u'ChangeRealtime', u'ChangeinPercent', u'DaysHigh', u'DaysLow', u'DaysRange', u'DaysValueChange', u'DividendShare', u'DividendYield', u'EBITDA', u'EarningsShare', u'ErrorIndicationreturnedforsymbolchangedinvalid', u'FiftydayMovingAverage', u'LastTradePriceOnly', u'MarketCapRealtime', u'MarketCapitalization', u'Name', u'Open', u'PEGRatio', u'PERatio', u'PercentChangeFromYearHigh', u'PercentChange', u'PercentChangeFromTwoHundreddayMovingAverage', u'PercentChangeFromYearLow', u'PreviousClose', u'PriceBook', u'PricePaid', u'ShortRatio', u'StockExchange', u'Symbol', u'TradeDate', u'TwoHundreddayMovingAverage', u'Volume', u'YearHigh', u'YearLow', 'datestamp', 'timestamp', 'funds', 'dayofweek', 'hourofday']
w.writerow(columns)
for block in range(0, len(stocks), 150):
stocks_subset = stocks[block:block+150]
# define the parameters
query = {
"q":"select * from yahoo.finance.quotes where symbol in (%s)" % ', '.join(stocks_subset),
"env":"http://datatables.org/alltables.env",
"format":"json"
}
# create the rest request
url = base_uri + urllib.urlencode(query)
rsp = get_json(url)
quotes = []
if 'query' in rsp and \
'results' in rsp['query']\
and 'quote' in rsp['query']['results']:
quotes = rsp['query']['results']['quote']
for quote in quotes:
for col in quote:
quote[col] = convert_dataypes(quote[col])
#Add day and time columns
quote['hourofday'] = str(get_hour())
quote['dayofweek'] = str(get_weekday())
cur_time = time.time()
est_date = datetime.now() + timedelta(hours=-5) #offset assumes AWS uses UTC
quote['timestamp'] = int(cur_time)
quote['datestamp'] = str(est_date)
#Add 401k plan fund names to each relevant row.
quote['funds'] = FundDict.get(quote['Symbol'])
pp.pprint(quote)
w.writerow([quote.get(col) for col in columns])
print "*"*80
f.close()
#Import s3 credentials from ubuntu directory
cred_file = open('/home/ubuntu/keys/s3_creds_mmx.json')
creds = json.load(cred_file)
AWS_ACCESS_KEY_ID = creds['aws_access_key_id']
AWS_SECRET_ACCESS_KEY = creds['aws_secret_access_key']
cred_file.close()
#write files to s3 bucket
s3 = boto.connect_s3(AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY)
bucket = s3.get_bucket('metamx-shecht')
key = bucket.new_key(s3_filename)
key.set_contents_from_filename(ubuntu_filename)
#delete file from ubuntu after saving it to s3
os.unlink(ubuntu_filename)
|
# -*- coding: utf-8 -*-
import json
import time
import pili.api as api
class Stream(object):
"""
Stream属性
hub: 字符串类型,hub名字
key: 字符串类型,流名
disabledTill: 整型,Unix时间戳,在这之前流均不可用,-1表示永久不可用
converts: 字符串数组,流的转码规格
"""
def __init__(self, auth, hub, key):
self.__auth__ = auth
if not (hub and key):
raise ValueError('invalid key')
self.key = key
self.hub = hub
self.__data__ = None
def __getattr__(self, attr):
if not self.__data__:
self.refresh()
try:
return self.__data__ if attr == "data" else self.__data__[attr]
except KeyError, e:
return e.message
def __repr__(self):
return self.to_json()
# refresh 主动更新流信息,会产生一次rpc调用
def refresh(self):
data = api.get_stream(self.__auth__, hub=self.hub, key=self.key)
self.__data__ = {}
for p in ["disabledTill", "converts"]:
self.__data__[p] = data[p] if p in data else None
self.__data__["key"] = self.key
self.__data__["hub"] = self.hub
return self
# disable 禁用流,till Unix时间戳,在这之前流均不可用
def disable(self, till=None):
if till is None:
till = -1
return api.disable_stream(self.__auth__, hub=self.hub, key=self.key, till=till)
# disabled 判断流是否被禁用
def disabled(self):
return self.disabledTill == -1 or self.disabledTill > int(time.time())
# enable 开启流
def enable(self):
return api.disable_stream(self.__auth__, hub=self.hub, key=self.key, till=0)
"""
status 查询直播信息
返回值:
startAt: 直播开始的Unix时间戳
clientIP: 推流的客户端IP
bps: 正整数 码率
fps:
audio: 正整数,音频帧率
video: 正整数,视频帧率
data: 正整数,数据帧率
"""
def status(self):
res = api.get_status(self.__auth__, hub=self.hub, key=self.key)
return res
"""
history 查询直播历史
输入参数:
start_second: Unix时间戳,起始时间,可选,默认不限制起始时间
end_second: Unix时间戳,结束时间,可选,默认为当前时间
返回值: 如下结构的数组
start: Unix时间戳,直播开始时间
end: Unix时间戳,直播结束时间
"""
def history(self, start_second=None, end_second=None):
res = api.get_history(self.__auth__, hub=self.hub, key=self.key, start=start_second, end=end_second)
return res["items"]
# save_as等同于saveas接口,出于兼容考虑,暂时保留
def save_as(self, start_second=None, end_second=None, **kwargs):
return self.saveas(start_second, end_second, **kwargs)
"""
saveas 保存直播回放到存储空间
输入参数:
start_second: Unix时间戳,起始时间,可选,默认不限制起始时间
end_second: Unix时间戳,结束时间,可选,默认为当前时间
fname: 保存的文件名,可选,不指定会随机生产
format: 保存的文件格式,可选,默认为m3u8,如果指定其他格式则保存动作为异步模式
pipeline: dora的私有队列,可选,不指定则使用默认队列
notify: 保存成功后的回调通知地址
expireDays: 对应ts文件的过期时间
-1 表示不修改ts文件的expire属性
0 表示修改ts文件生命周期为永久保存
>0 表示修改ts文件的的生命周期为expireDay
返回值:
fname: 保存到存储空间的文件名
persistentID: 异步模式时,持久化异步处理任务ID,通常用不到该字段
"""
def saveas(self, start_second=None, end_second=None, **kwargs):
kwargs["hub"] = self.hub
kwargs["key"] = self.key
if start_second is not None:
kwargs["start"] = start_second
if end_second is not None:
kwargs["end"] = end_second
res = api.stream_saveas(self.__auth__, **kwargs)
return res
"""
snapshot 保存直播截图到存储空间
输入参数:
time: Unix时间戳,要保存的时间点,默认为当前时间
fname: 保存的文件名,可选,不指定会随机生产
format: 保存的文件格式,可选,默认为jpg
返回值:
fname: 保存到存储空间的文件名
"""
def snapshot(self, **kwargs):
kwargs["hub"] = self.hub
kwargs["key"] = self.key
res = api.stream_snapshot(self.__auth__, **kwargs)
return res
"""
update_converts 更改流的转码规格
输入参数:
profiles: 字符串数组,实时转码规格
返回值: 无
"""
def update_converts(self, profiles=[]):
res = api.update_stream_converts(self.__auth__, hub=self.hub, key=self.key, profiles=profiles)
return res
def to_json(self):
return json.dumps(self.data)
|
""" Watch the depth of a given symbol.
"""
import signal
import sys
from binance import (
BinanceClient,
configure_app,
get_default_arg_parser,
)
def quit_handler(signum, frame):
sys.exit(0)
signal.signal(signal.SIGINT, quit_handler)
signal.signal(signal.SIGTERM, quit_handler)
def main():
arg_parser = get_default_arg_parser()
arg_parser.add_argument('symbol', type=str,
help='watch the depth of symbol <SYMBOL>.')
arg_parser.add_argument('-l', '--depth-limit', type=int,
help='show the <DEPTH> latest orders on each side.')
settings, config = configure_app(arg_parser=arg_parser)
symbol = config['args']['symbol']
depth_limit = config['args']['depth_limit']
client = BinanceClient(settings['apikey'], settings['apisecret'])
@client.event
async def on_depth_ready(depth):
""" This coroutine runs when the inital /depth API call returns.
"""
print('depth ready')
client.depth_cache[symbol].pretty_print(depth_limit)
@client.event
async def on_depth_event(event):
""" This coroutine runs whenever a @depth websocket event is received.
"""
print(f'update id: {event["u"]}') # print the event id
client.depth_cache[symbol].pretty_print(depth_limit)
client.watch_depth(symbol)
if __name__ == '__main__':
main()
|
from django import forms
from django.contrib.auth.models import User
from socialapp.models import User_Personal
from django.core import validators
from django.core.exceptions import ValidationError
def validate_gender(value):
if str(value).upper() != "MALE" and str(value).upper() != "FEMALE":
print("gender should be Male or Female")
raise forms.ValidationError("gender should be Male or Female")
class UserForm(forms.ModelForm):
first_name = forms.CharField(label='First Name',
widget=forms.TextInput(
attrs={'class': 'form-control input-group-lg', 'autocomplete': 'on','placeholder':'First Name'}))
last_name = forms.CharField(label='Last Name',
widget=forms.TextInput(
attrs={'class': 'form-control input-group-lg', 'autocomplete': 'on','placeholder':'Last Name'}))
username = forms.CharField(label='User Name',
widget=forms.TextInput(attrs={'class': 'form-control input-group-lg', 'autocomplete': 'on','placeholder':'Username'}))
email = forms.EmailField(label='Email',
widget=forms.EmailInput(attrs={'class': 'form-control input-group-lg', 'autocomplete': 'on','placeholder':'Email'}))
password=forms.CharField(label='Password',
widget=forms.PasswordInput(attrs={'class': 'form-control input-group-lg', 'autocomplete': 'on','placeholder':'Password'}))
class Meta:
model=User
fields=('first_name','last_name','username','email','password')
def clean(self):
all_clean_data = super().clean()
email = all_clean_data['email']
if User.objects.filter(email=email).exists():
raise forms.ValidationError("Email already exists")
class PersonalInfoForm(forms.ModelForm):
dob = forms.CharField(label='Date of Birth',
widget=forms.TextInput(
attrs={'class': 'form-control input-group-lg', 'autocomplete': 'on',
'placeholder': 'Date of Birth'}))
gender = forms.CharField(label='Gender',validators=[validate_gender],
widget=forms.TextInput(
attrs={'class': 'form-control input-group-lg', 'autocomplete': 'on',
'placeholder': 'Gender'}))
city = forms.CharField(label='City',
widget=forms.TextInput(
attrs={'class': 'form-control input-group-lg', 'autocomplete': 'on',
'placeholder': 'City'}))
country = forms.CharField(label='Country',
widget=forms.TextInput(
attrs={'class': 'form-control input-group-lg', 'autocomplete': 'on',
'placeholder': 'Country'}))
class Meta:
model=User_Personal
fields=('dob','gender','city','country')
|
from flask import Flask, render_template, url_for, request, redirect
import os
import json
import glob
from datetime import datetime
from app import app
@app.route('/')
@app.route('/index')
def index():
# Windows path
# names = sorted(os.listdir(os.getcwd() + r'\app\static\img\name'))
# tasks = sorted(os.listdir(os.getcwd() + r'\app\static\img\task'))
#Linux path
path1 = os.getcwd() + r'/app/static/img/name'
path2 = os.getcwd() + r'/app/static/img/task'
names = sorted(os.listdir(path1))
tasks = sorted(os.listdir(path2))
return render_template('show.html', names=names, tasks=tasks)
@app.route("/new")
def new():
return render_template("new.html")
@app.route("/upload", methods=["POST"])
def upload():
"""Handle the upload of a file."""
form = request.form
# Create a unique "session ID" for this particular batch of uploads.
upload_key = datetime.strftime(datetime.now(), '%Y-%m-%d')
# Is the upload using Ajax, or a direct POST by the form?
is_ajax = False
if form.get("__ajax", None) == "true":
is_ajax = True
# Target folder for these uploads.
if form.get("imgtype", None) == "name":
target = "app/static/img/name"
elif form.get("imgtype", None) == "task":
target = "app/static/img/task"
print("=== Form Data ===")
for key, value in list(form.items()):
print(key, "=>", value)
for upload in request.files.getlist("file"):
filename = upload.filename.rsplit("/")[0].replace("Screenshot_", "")
destination = "/".join([target, filename])
print("Accept incoming file:", filename)
print("Save it to:", destination)
upload.save(destination)
if is_ajax:
return ajax_response(True, upload_key)
else:
return redirect(url_for("upload_complete", uuid=upload_key))
def ajax_response(status, msg):
status_code = "ok" if status else "error"
return json.dumps(dict(
status=status_code,
msg=msg,
))
|
# coding=utf-8
#------------------------------------------------------------------------------------------------------
# TDA596 Labs - Server Skeleton
# server/server.py
# Input: Node_ID total_number_of_ID
# Student Group:
# Student names: John Doe & John Doe
#------------------------------------------------------------------------------------------------------
# We import various libraries
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler # Socket specifically designed to handle HTTP requests
import sys # Retrieve arguments
from urlparse import parse_qs # Parse POST data
from httplib import HTTPConnection # Create a HTTP connection, as a client (for POST requests to the other vessels)
from urllib import urlencode # Encode POST content into the HTTP header
from codecs import open # Open a file
from threading import Thread # Thread Management
#------------------------------------------------------------------------------------------------------
# Global variables for HTML templates
board_frontpage_footer_template = ""
board_frontpage_header_template = ""
boardcontents_template = ""
entry_template = ""
#------------------------------------------------------------------------------------------------------
# Static variables definitions
PORT_NUMBER = 80
#------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------
class BlackboardServer(HTTPServer):
#------------------------------------------------------------------------------------------------------
def __init__(self, server_address, handler, node_id, vessel_list):
# We call the super init
HTTPServer.__init__(self,server_address, handler)
# we create the dictionary of values
self.store = {}
# We keep a variable of the next id to insert
self.current_key = -1
# our own ID (IP is 10.1.0.ID)
self.vessel_id = vessel_id
# The list of other vessels
self.vessels = vessel_list
#------------------------------------------------------------------------------------------------------
# We add a value received to the store
def add_value_to_store(self, value):
# We add the value to the store
self.current_key = self.current_key + 1
newEntry = entry_template % ('entries/' + str(self.current_key), self.current_key, value)
self.store[self.current_key] = newEntry
pass
#------------------------------------------------------------------------------------------------------
# We modify a value received in the store
def modify_value_in_store(self,key,value):
# we modify a value in the store if it exists
pass
#------------------------------------------------------------------------------------------------------
# We delete a value received from the store
def delete_value_in_store(self,key):
# we delete a value in the store if it exists
try:
self.store.pop(key)
print 'Entry with ' + str(key) + ' is deleted'
except KeyError:
print 'Value not in board...'
pass
#------------------------------------------------------------------------------------------------------
# Contact a specific vessel with a set of variables to transmit to it
def contact_vessel(self, vessel_ip, path, action, key, value):
# the Boolean variable we will return
success = False
# The variables must be encoded in the URL format, through urllib.urlencode
post_content = urlencode({'action': action, 'key': key, 'value': value})
# the HTTP header must contain the type of data we are transmitting, here URL encoded
headers = {"Content-type": "application/x-www-form-urlencoded"}
# We should try to catch errors when contacting the vessel
try:
# We contact vessel:PORT_NUMBER since we all use the same port
# We can set a timeout, after which the connection fails if nothing happened
connection = HTTPConnection("%s:%d" % (vessel, PORT_NUMBER), timeout = 30)
# We only use POST to send data (PUT and DELETE not supported)
action_type = "POST"
# We send the HTTP request
connection.request(action_type, path, post_content, headers)
# We retrieve the response
response = connection.getresponse()
# We want to check the status, the body should be empty
status = response.status
# If we receive a HTTP 200 - OK
if status == 200:
success = True
# We catch every possible exceptions
except Exception as e:
print "Error while contacting %s" % vessel
# printing the error given by Python
print(e)
# we return if we succeeded or not
return success
#------------------------------------------------------------------------------------------------------
# We send a received value to all the other vessels of the system
def propagate_value_to_vessels(self, path, action, key, value):
# We iterate through the vessel list
for vessel in self.vessels:
# We should not send it to our own IP, or we would create an infinite loop of updates
if vessel != ("10.1.0.%s" % self.vessel_id):
# A good practice would be to try again if the request failed
# Here, we do it only once
self.contact_vessel(vessel, path, action, key, value)
#------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------
# This class implements the logic when a server receives a GET or POST request
# It can access to the server data through self.server.*
# i.e. the store is accessible through self.server.store
class BlackboardRequestHandler(BaseHTTPRequestHandler):
#------------------------------------------------------------------------------------------------------
# We fill the HTTP headers
def set_HTTP_headers(self, status_code = 200):
# We set the response status code (200 if OK, something else otherwise)
self.send_response(status_code)
# We set the content type to HTML
self.send_header("Content-type","text/html")
# No more important headers, we can close them
self.end_headers()
#------------------------------------------------------------------------------------------------------
# a POST request must be parsed through urlparse.parse_QS, since the content is URL encoded
def parse_POST_request(self):
post_data = ""
# We need to parse the response, so we must know the length of the content
length = int(self.headers['Content-Length'])
# we can now parse the content using parse_qs
post_data = parse_qs(self.rfile.read(length), keep_blank_values=1)
# we return the data
return post_data
#------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------
# Request handling - GET
#------------------------------------------------------------------------------------------------------
# This function contains the logic executed when this server receives a GET request
# This function is called AUTOMATICALLY upon reception
def do_GET(self):
print("Receiving a GET on path %s" % self.path)
html_response = ''
# Here, we should check which path was requested and call the right logic based on it
if self.path == '/':
html_response = self.do_GET_Index()
elif self.path == '/board':
html_response = self.do_GET_Board()
# We set the response status code to 200 (OK)
self.set_HTTP_headers(200)
self.wfile.write(html_response.encode("utf8"))
#------------------------------------------------------------------------------------------------------
# GET logic - specific path
#------------------------------------------------------------------------------------------------------
def do_GET_Index(self):
#In practice, go over the entries list,
#produce the boardcontents part,
#then construct the full page by combining all the parts ...
html_header = board_frontpage_header_template
html_board = self.do_GET_Board()
html_footer = board_frontpage_footer_template % (self.server.vessels)
# Combine the HTML components
html_response = html_header + html_board + html_footer
return html_response
#------------------------------------------------------------------------------------------------------
# we might want some other functions
#------------------------------------------------------------------------------------------------------
def do_GET_Board(self):
html_board_content = ''
html_board = boardcontents_template % (self.server.vessel_id, '')
# Loop over all entries and attach to board
if (self.server.current_key != -1):
count = 0
print len(self.server.store)
while (count <= len(self.server.store)):
if (self.server.store.get(count) != None):
html_board_content = html_board_content + self.server.store.get(count)
count = count + 1
html_board = boardcontents_template % (self.server.vessel_id, html_board_content)
return html_board
#------------------------------------------------------------------------------------------------------
# Request handling - POST
#------------------------------------------------------------------------------------------------------
def do_POST(self):
print("Receiving a POST on %s" % self.path)
# Here, we should check which path was requested and call the right logic based on it
# We should also parse the data received
# and set the headers for the client
if self.path == '/board':
self.do_POST_Board()
# Delete value
elif self.parse_POST_request()['delete'][0] == '1':
self.do_DELETE_value()
# Modify value
elif self.parse_POST_request()['delete'][0] == '0':
print 'Modify'
# We set the response status code to 200 (OK)
self.set_HTTP_headers(200)
# If we want to retransmit what we received to the other vessels
retransmit = False # We deactivate this functionnality for the skeleton, but you have to use it for the lab!
if retransmit:
# do_POST send the message only when the function finishes
# We must then create threads if we want to do some heavy computation
#
# Random content
thread = Thread(target=self.server.propagate_value_to_vessels,args=("action", "key", "value") )
# We kill the process if we kill the server
thread.daemon = True
# We start the thread
thread.start()
#------------------------------------------------------------------------------------------------------
# POST Logic
#------------------------------------------------------------------------------------------------------
# We might want some functions here as well
#------------------------------------------------------------------------------------------------------
def do_POST_Board(self):
post_params = self.parse_POST_request()
self.server.add_value_to_store(post_params['entry'][0])
def do_DELETE_value(self):
url_path_id = int(self.path[-1:])
self.server.delete_value_in_store(url_path_id)
#------------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------------
# Execute the code
if __name__ == '__main__':
## read the templates from the corresponding html files
boardcontents_template = open('./boardcontents_template.html', 'r', encoding='utf-8').read()
board_frontpage_header_template = open('./board_frontpage_header_template.html', 'r', encoding='utf-8').read()
board_frontpage_footer_template = open('./board_frontpage_footer_template.html', 'r', encoding='utf-8').read()
entry_template = open('./entry_template.html', 'r', encoding='utf-8').read()
vessel_list = []
vessel_id = 0
# Checking the arguments
if len(sys.argv) != 3: # 2 args, the script and the vessel name
print("Arguments: vessel_ID number_of_vessels")
else:
# We need to know the vessel IP
vessel_id = int(sys.argv[1])
# We need to write the other vessels IP, based on the knowledge of their number
for i in range(1, int(sys.argv[2])+1):
vessel_list.append("10.1.0.%d" % i) # We can add ourselves, we have a test in the propagation
# We launch a server
server = BlackboardServer(('', PORT_NUMBER), BlackboardRequestHandler, vessel_id, vessel_list)
print("Starting the server on port %d" % PORT_NUMBER)
try:
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
print("Stopping Server")
#------------------------------------------------------------------------------------------------------
|
import pandas as pd
dataset = pd.read_csv("AllCountries.csv")
selected_data = dataset.loc[:, ['Country', 'LandArea']]
#print(selected_data)
for i in selected_data.itertuples():
if i['LandArea'] > 2000: # i[2] or i.LandArea
print(i.Country)
# does this work row['landArea'] produce the same result? |
def readFile(filename):
userList = []
try:
with open(filename) as data:
data.readline()
for line in data:
eachLine = line.rstrip().split(',')
user = SysUser(eachLine)
userList.append(user)
except FileNotFoundError or ValueError:
print('Invalid file name')
return userList
class SysUser:
def __init__(self, userList):
self._firstName = userList[0]
self._middleName = userList[1]
self._lastName = userList[2]
self._password = userList[3]
self._securityQuestion = userList[4]
self._securityAnswer = userList[5]
self._age = userList[6]
def getName(self):
return self._firstName + ' ' + self._middleName[0] + '.' + ' ' + self._lastName
def getUsername(self):
return self._firstName[0].lower() + self._middleName[0].lower() + self._lastName[0:6].lower()
def getAge(self):
return int(self._age)
def checkPassword(self, entered):
if entered != self._password:
return False
return True
def getSecQuestion(self):
return int(self._securityQuestion)
def checkSecAnswer(self, entered):
if entered != self._securityAnswer:
return False
return True
def setSecQuestion(self, num):
self._securityQuestion = int(num)
def setSecAnswer(self, answer):
self._securityAnwer = answer
header = f' Name Username Age \n----------- -------- ---'
# Function for testing user logins
def loginUser (users, username, inputPass='', inputSecAns=''):
print ()
print ('username: ' + username)
idx = -1
for i in range(len(users)):
if (username == users[i].getUsername()):
idx = i
loginValid = True
if (idx < 0):
print ('*** Username invalid')
loginValid = False
print ('password: ' + inputPass)
if not users[idx].checkPassword(inputPass):
print ('*** Password invalid')
loginValid = False
print (sec_ques[users[idx].getSecQuestion()] + ' ' + inputSecAns)
if not users[idx].checkSecAnswer(inputSecAns):
print ('*** Security answer invalid')
loginValid = False
if loginValid:
print ('*** Welcome, ' + users[idx].getName())
else:
print ('*** Login failed. Please try again.')
return loginValid
# Given list of security questions
sec_ques = [ 'What\'s your favorite color?',
'In what city was your mother born?',
'What\'s the name of your first pet?',
'What\'s the name of your favorite sports team?',
'What was the make of your first car?',
'What\'s your school mascot?' ]
#### Test cases for testing user logins
if __name__ == '__main__':
# This first test case will fail
users = readFile ('sysUsers.csv')
# This test case will work
users = readFile ('sys_users.csv')
# The first login is valid, while the other three fail (for different reasons)
loginUser (users, 'jtrobins', 'mightyMouse', 'Honda')
loginUser (users, 'tpbradshaw', 'endZone', 'Steelers')
loginUser (users, 'jagarner', 'uglyPeople', 'Big Red')
loginUser (users, 'dsbrown', 'bubbleGum', 'Atalnta')
print ()
# Here, put your code for printing out the names, usernames, and ages of
# the users of your system
print(header)
for person in readFile('sys_users.csv'):
display = f'{person.getName():<20} {person.getUsername():<8} {person.getAge():>3}'
print(display)
|
#!/usr/bin/python
import sys
import csv
"""
Your mapper function should print out 10 lines containing longest posts, sorted in
ascending order from shortest to longest.
"""
def mapper(inputFile, outputFile):
with open(inputFile,'rb') as tsvin, open(outputFile, 'wb') as csvout:
reader = csv.reader(tsvin, delimiter='\t')
writer = csv.writer(csvout, delimiter='\t', quotechar='"', quoting=csv.QUOTE_ALL)
line_list = []
first_line = True
for line in reader:
if first_line: #skip first line
first_line = False
continue
line_list.append(line)
line_list.sort(key = lambda x: len(x[4]), reverse = True)
for line in reversed(line_list[0:10]):
writer.writerow(line)
# This function allows you to test the mapper with the provided test string
def main():
print "start"
mapper('forum_node.tsv', 'forum_longest_lines.csv')
print "done"
if __name__ == "__main__":
main() |
def check():
for idx in range(3):
if lst[idx] == lst[idx + 1]:
return idx
mmax, cost = -1, 0
for i in range(int(input())):
lst = sorted(list(map(int, input().split())))
s = len(set(lst))
if s == 1:
cost = 50000 + lst[0] * 5000
elif s == 2:
if lst[0] == lst[1] and lst[2] == lst[3]:
cost = 2000 + lst[0] * 500 + lst[3] * 500
else:
cost = 10000 + lst[1] * 1000
elif s == 3:
cost = 1000 + lst[check()] * 100
else:
cost = lst[3] * 100
if cost > mmax:
mmax = cost
print(mmax)
|
a=int(raw_input())
fact=1
if a==0:
print "1"
elif a>0:
for i in range(1,a+1):
fact=fact*i
print fact
|
from transformers import BertForSequenceClassification, BertTokenizerFast, Trainer, TrainingArguments
from nlp import load_dataset
import torch
import numpy as np
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
model = BertForSequenceClassification.from_pretrained('models/BERT_full_question')
tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
def tokenize(batch):
return tokenizer(batch['text'], truncation=True, max_length = 256, add_special_tokens=True, padding='max_length', return_attention_mask=True)
test_dataset = load_dataset('json', data_files={'test': 'dataset_full_question/quanta_test.json'}, field='questions')['test']
test_dataset = test_dataset.map(lambda example: {'label': [0 if example['difficulty'] == 'School' else 1]})
test_dataset = test_dataset.map(tokenize, batched=True, batch_size=len(test_dataset))
test_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'label'])
def compute_metrics(pred):
labels = pred.label_ids
# print(labels)
preds = pred.predictions.argmax(-1)
# print(preds)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}
trainer = Trainer(
model=model,
compute_metrics=compute_metrics,
eval_dataset=test_dataset
)
print(trainer.evaluate())
|
# -*- coding:utf-8 -*-
from openerp import api, models, fields
class HRSalaryRule(models.Model):
_inherit = "hr.salary.rule"
is_dynamic = fields.Boolean("Is dynamic Rule ?")
is_compute_prorata = fields.Boolean("Is compute prorata ?")
account_id = fields.Many2one("account.account", "Compte")
@api.onchange("is_dynamic")
def onchange_is_dynamic(self):
"""CHange Amount Select """
if self.is_dynamic:
self.amount_select = 'code'
@api.model
def create(self, values):
res = super(HRSalaryRule, self).create(values)
if res.is_dynamic:
res.amount_select = 'code'
res.amount_python_compute = "result = inputs.%s.amount if inputs.%s and inputs.%s.amount else 0" % (res.code, res.code, res.code)
self.env['hr.rule.input'].create({'code': res.code, 'name': res.name, 'input_id': res.id})
return res
@api.multi
def write(self, values):
res = super(HRSalaryRule, self).write(values)
if "is_dynamic" in values:
for rec in self:
rec.amount_select = 'code'
rec.amount_python_compute = "result = inputs.%s.amount if inputs.%s and inputs.%s.amount else 0" % (rec.code, rec.code, rec.code)
rec.input_ids.unlink()
self.env['hr.rule.input'].create({'code': rec.code, 'name': rec.name, 'input_id': rec.id})
return res
|
# -*- coding: utf-8 -*-
"""Default configuration file. We check for SECRET_KEY,
DATABASE_PASSWORD and DATABASE_HOST on the environment. These values are
also set in instance/config.py.
To run this example, you will need to put those values in a config.py
file in the instance folder, or create a start up scrip where you export
these values and then start the application.
For example::
$ export DATABASE_PASSWORD='my-database-password'
$ export DATABASE_HOST="my-database-server"
$ export FLASK_CONFIG="development" # or test, production, ...
$ python manage.py
"""
import os
class Config:
"""Basic Flask settings."""
DEBUG=False
SECRET_KEY = os.environ.get('SECRET_KEY') or 'myspecialsecretkey'
""" OTRS Settings """
DATABASE_NAME = 'otrs'
DATABASE_USER = 'otrs'
DATABASE_PASSWORD = os.environ.get('DATABASE_PASSWORD') or ""
DATABASE_HOST = os.environ.get('DATABASE_HOST') or ""
DATABASE_PORT = 5432
class DevelopmentConfig(Config):
DEBUG = True
DEBUG_TB_INTERCEPT_REDIRECTS = False
"""Database path."""
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir))
DATABASE = os.path.join(BASEDIR,"data","temp.db")
class ProductionConfig(Config):
DEBUG = False
class TestingConfig(Config):
TESTING = True
WTF_CSRF_ENABLED = False
"""Database path."""
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir))
DATABASE = os.path.join(BASEDIR,"data","temp.db")
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
"""dictionary: Different configuration settings."""
|
import cv2
# 選擇第二隻攝影機
cap = cv2.VideoCapture(0)
for i in range(47):
print("No.={} parameter={}".format(i,cap.get(i)))
while(True):
# 從攝影機擷取一張影像
ret, frame = cap.read()
# 顯示圖片
cv2.imshow('frame', frame)
# 若按下 q 鍵則離開迴圈
key = cv2.waitKey(1)
if key == ord('s'):
print(frame.shape)
if key == ord('q'):
break
# 釋放攝影機
cap.release()
# 關閉所有 OpenCV 視窗
cv2.destroyAllWindows() |
import numpy as np
from sklearn.metrics import mean_squared_error
import multiple_input as mi
# Define predict_with_network()
def predict_with_network(input_data_row, weights):
# Calculate node 0 value
node_0_input = (input_data_row * weights['node_0']).sum()
node_0_output = mi.relu(node_0_input)
# Calculate node 1 value
node_1_input = (input_data_row * weights['node_1']).sum()
node_1_output = mi.relu(node_1_input)
# Put node values into array: hidden_layer_outputs
hidden_layer_outputs = np.array([node_0_output, node_1_output])
# Calculate model output
input_to_final_layer = (hidden_layer_outputs * weights['output']).sum()
model_output = mi.relu(input_to_final_layer)
# Return model output
return(model_output)
def main():
# The data point you will make a prediction for
input_data = np.array([[3,5], [1,-1], [0,0], [8,4]])
# Sample weights
weights_0 = {'node_0': [2, 1],
'node_1': [1, 2],
'output': [1, 1]
}
# Create weights that cause the network to make perfect prediction (3): weights_1
weights_1 = {'node_0': [2, 1],
'node_1': [1, 2],
'output': [1, 0]
}
# The actual target value, used to calculate the error
target_actual = [1,3,5,7]
model_0_out = []
model_1_out = []
for row in input_data:
model_0_out.append(predict_with_network(row, weights_0))
model_1_out.append(predict_with_network(row, weights_1))
mse_0 = mean_squared_error(target_actual, model_0_out)
mse_1 = mean_squared_error(target_actual, model_1_out)
print(mse_0)
print(mse_1)
if __name__ == '__main__':
main()
|
'''
Created on 23 avr. 2019
@author: gtexier
'''
from enum import IntEnum, unique
@unique
class Intersection(IntEnum):
'''
Name of the intersection types
'''
PathFour = 0
PathThreeLeftFront = 1
PathThreeRightFront = 2
PathThreeLeftRight = 3
PathTwoLeft = 4
PathTwoRight = 5
PathTwoFront = 6
PathOne = 7
PathZero = 8
class IntersectionError(Exception):
pass
|
'''
Write a Python program to print the following floating numbers upto 2 decimal places.
'''
x = 3.1415926
y = 12.9999
print "Original number:", x
# 1st variant
print "New number:", round(x, 2)
# or
#print "New number:", '{:.2f}'.format(x)
print "Original number:", y
# 1st variant
print "New number:", round(y, 2)
# or
#print "New number:", '{:.2f}'.format(y) |
#!/usr/bin/env python3
import random
import copy
def random_pirellone(m, n, seed="any", solvable=False):
if seed=="any":
random.seed()
seed = random.randrange(0,1000000)
else:
seed = int(seed)
random.seed(seed)
line = [random.randint(0, 1) for _ in range(n)]
inv = [int(not x) for x in line]
pirellone = []
for _ in range(m):
if random.randint(0, 1) == 0:
pirellone.append(line[:])
else:
pirellone.append(inv[:])
if not solvable:
row = random.randrange(0, n)
col = random.randrange(0, m)
pirellone[row][col] = 1-pirellone[row][col]
return pirellone, seed
def switch_row(i,pirellone):
for j in range(len(pirellone[0])):
pirellone[i][j] = int(not pirellone[i][j])
def switch_col(j,pirellone):
for i in range(len(pirellone)):
pirellone[i][j] = int(not pirellone[i][j])
def is_solvable(pirellone, m, n):
for i in range(m):
inv = pirellone[0][0] != pirellone[i][0]
for j in range(n):
v = not pirellone[i][j] if inv else pirellone[i][j]
if v != pirellone[0][j]:
return False
return True
def print_pirellone(pirellone):
for l in pirellone:
print(*l)
def off_lista(pirellone,solu,TAc, LANG):
l=len(solu)
empty=[[0 for j in range(0,len(pirellone[0]))] for i in range(0,len(pirellone))]
for i in range(0,l):
if solu[i][0]=='r':
if len(solu[i])>2:
switch_row(int(solu[i][1])*10+int(solu[i][2])-1,pirellone)
else:
switch_row(int(solu[i][1])-1,pirellone)
elif solu[i][0]=='c':
if len(solu[i])>2:
switch_col(int(solu[i][1])*10+int(solu[i][2])-1,pirellone)
else:
switch_col(int(solu[i][1])-1,pirellone)
if is_solvable(pirellone, len(pirellone), len(pirellone[0])):
if empty==pirellone:
TAc.OK()
TAc.print("This sequence turns off all lights", "green", ["bold"])
return
else:
TAc.NO()
TAc.print("This sequence doesn't turn off all lights see what happens using your solution:", "red", ["bold"])
print_pirellone(pirellone)
return
else:
check_numberlight(pirellone,count(pirellone),TAc, LANG)
return
def off(pirellone,rs,cs,TAc, LANG): #sapendo sottoinsieme
m=len(rs)
n=len(cs)
empty=[[0 for j in range(0,n)] for i in range(0,m)]
for i in range(0,m):
if rs[i]:
switch_row(i,pirellone)
for j in range(0,n):
if cs[j]:
switch_col(j,pirellone)
if is_solvable(pirellone, len(pirellone), len(pirellone[0])):
if empty==pirellone:
TAc.OK()
TAc.print("This subset turns off all lights", "green", ["bold"])
return
else:
TAc.NO()
TAc.print("This subset doesn't turn off all lights see what happens using your solution:", "red", ["bold"])
print_pirellone(pirellone)
return
else:
check_numberlight(pirellone,count(pirellone),TAc, LANG)
return
def check_numberlight(a,answer,TAc, LANG):
s=[]
for i in range(1,len(a),2):
s.append(i)
up=0
down=1
matrix=0
index=[]
while up<len(a) and down<len(a):
for i in range(len(a[0])-1):
for j in range(i+1,len(a[0])):
if j not in index and i not in index:
if a[up][i]==0 and a[down][i]==0:
if (a[up][j]==1 and a[down][j]==0) or (a[up][j]==0 and a[down][j]==1):
matrix+=1
#print("matrice di colonne: "+str(i)+","+str(j)+" e righe: "+str(up)+","+str(down))
index.append(j)
index.append(i)
if (a[up][i]==1 and a[down][i]==0) or (a[up][i]==0 and a[down][i]==1):
if a[up][j]==0 and a[down][j]==0:
matrix+=1
#print("matrice di colonne: "+str(i)+","+str(j)+" e righe: "+str(up)+","+str(down))
index.append(j)
index.append(i)
if a[up][i]==1 and a[down][i]==1:
if (a[up][j]==1 and a[down][j]==0) or (a[up][j]==0 and a[down][j]==1):
matrix+=1
#print("matrice di colonne: "+str(i)+","+str(j)+" e righe: "+str(up)+","+str(down))
index.append(j)
index.append(i)
if (a[up][i]==1 and a[down][i]==0) or (a[up][i]==0 and a[down][i]==1):
if a[up][j]==1 and a[down][j]==1:
matrix+=1
#print("matrice di colonne: "+str(i)+","+str(j)+" e righe: "+str(up)+","+str(down))
index.append(j)
index.append(i)
up+=1
down+=1
if down in s:
index=[]
if answer==matrix:
TAc.OK()
TAc.print("You can not turn off more lights", "green", ["bold"])
return
elif answer>matrix:
TAc.NO()
TAc.print("You can turn off more lights, check it: ", "red", ["bold"])
print_pirellone(a)
return
def count(p):
m=len(p)
s=0
for i in range(m):
s+=sum(p[i])
return(s)
def off_lista_noprint(pirellone,solu):
l=len(solu)
empty=[[0 for j in range(0,len(pirellone[0]))] for i in range(0,len(pirellone))]
for i in range(0,l):
if solu[i][0]=='r':
if len(solu[i])>2:
switch_row(int(solu[i][1])*10+int(solu[i][2])-1,pirellone)
else:
switch_row(int(solu[i][1])-1,pirellone)
elif solu[i][0]=='c':
if len(solu[i])>2:
switch_col(int(solu[i][1])*10+int(solu[i][2])-1,pirellone)
else:
switch_col(int(solu[i][1])-1,pirellone)
if empty==pirellone:
return True
else:
return False
def soluzione(pirellone,m,n):
if is_solvable(pirellone, m, n):
R=[0]*len(pirellone)
C=[0]*len(pirellone[0])
for i in range(0,m):
for j in range(0,n):
if pirellone[i][j]:
C[j] = 1
switch_col(j,pirellone)
for i in range(0,m):
if pirellone[i][0]:
R[i] = 1
switch_row(i,pirellone)
lista=[]
for i in range(m):
if R[i]:
lista.append(f"r{i+1}")
for j in range(n):
if C[j]:
lista.append(f"c{j+1}")
return lista
def soluzione_min(pirellone,m,n):
pirellone1=copy.deepcopy(pirellone)
if is_solvable(pirellone, m, n):
R1=[0]*len(pirellone)
C1=[0]*len(pirellone[0])
R2=[0]*len(pirellone)
C2=[0]*len(pirellone[0])
for j in range(0,n):
if pirellone1[0][j]:
C1[j] = 1
switch_col(j,pirellone1)
for i in range(0,m):
if pirellone1[i][0]:
R1[i] = 1
switch_row(i,pirellone1)
pirellone2=copy.deepcopy(pirellone)
for i in range(0,m):
if pirellone2[i][0]:
R2[i] = 1
switch_row(i,pirellone2)
for j in range(0,n):
if pirellone2[0][j]:
C2[j] = 1
switch_col(j,pirellone2)
lista=[]
if (sum(R1)+sum(C1))<=(sum(R2)+sum(C2)):
for i in range(m):
if R1[i]:
lista.append(f"r{i+1}")
for j in range(n):
if C1[j]:
lista.append(f"c{j+1}")
else:
for i in range(m):
if R2[i]:
lista.append(f"r{i+1}")
for j in range(n):
if C2[j]:
lista.append(f"c{j+1}")
return lista
def soluzione_min_step(pirellone,m,n):
lista=[]
if is_solvable(pirellone, m, n):
R1=[0]*len(pirellone)
C1=[0]*len(pirellone[0])
for j in range(0,n):
if pirellone[0][j]:
C1[j] = 1
lista.append(f"c{j+1}")
switch_col(j,pirellone)
print_pirellone(pirellone)
return stampa_lista(lista)
for i in range(0,m):
if pirellone[i][0]:
R1[i] = 1
switch_row(i,pirellone)
lista.append(f"r{i+1}")
print_pirellone(pirellone)
return stampa_lista(lista)
return
def solution_toolong(sol,m,n):
longsol=sol
#stampa_lista(sol)
for i in range(random.randint(0,int(len(sol)/2)-1 )):
num=sol[random.randint(0, len(sol)-1)]
longsol.append(num)
longsol.append(num)
if random.randint(0,1)==1:
num=f"r{random.randint(1,m)}"
if num not in sol:
longsol.append(num)
longsol.append(num)
if random.randint(0,2)==1:
num=f"c{random.randint(1,n)}"
if num not in sol:
longsol.append(num)
longsol.append(num)
random.shuffle(longsol)
return(longsol)
def stampa_lista(lista):
s=''
for i in range(len(lista)):
s+=f'{lista[i]} '
print(s)
return |
import dash
import kdash
from flask import Flask
server = kdash.Add_Dash(Flask(__name__))
# dash_app = dash.Dash(server=server, url_base_pathname='/dataview/')
kdash.Add_Dash
if __name__ == '__main__':
server.run('0.0.0.0', 8888, debug=True)
|
from django.contrib import admin
from .models import Category, Keyword
# Register your models here.
admin.AdminSite.site_title = '宠物平台管理系统'
admin.AdminSite.site_header = '旅行者Ⅰ号'
admin.AdminSite.index_title = '平台管理'
class KeywordInline(admin.StackedInline):
model = Keyword
extra = 3
@admin.register(Category)
class Category(admin.ModelAdmin):
fields = ['petName']
list_display = ['petName', 'listKeyword']
inlines = [KeywordInline]
|
# Generated by Django 2.2.6 on 2019-11-16 17:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='baseoption',
name='color',
field=models.CharField(choices=[('LT', 'under 21'), ('MD', '21 ~ 23'), ('DK', 'over 23'), ('NO', 'Other type')], max_length=2),
),
]
|
# zur Kommunikation ueber die serielle Schnittstelle
import serial
import string
# fuer die Datenbank-Verbindung (drauf achten, dass richtige fuer Python-Version installiert)
import mysql
import mysql.connector
from time import sleep
from time import gmtime, strftime
#Parameter fuer die Verbindung zur Datenbank:
#Datenbits
bytesize=serial.EIGHTBITS
#Baudrate
baud = 115200
#Paritaet
parity = serial.PARITY_NONE
#Stopbits
stopbit = serial.STOPBITS_ONE
timeout = 5
while True:
try:
ser = serial.Serial('/dev/ttyUSB0',baud,bytesize,parity,stopbit,timeout)
connection = mysql.connector.connect(host = "localhost", user = "phpmyadmin", passwd = "wetter1", db = "iehDaten")
except Exception as e:
print(e)
with open("/var/www/html/errorlog.txt", "a") as myfile:
myfile.write(strftime("%Y-%m-%d %H:%M:%S", gmtime())+"\t"+str(e)+"\n")
sleep(10)
while True:
try:
#Buffer leeren
ser.flush()
#Befehl zum Auslesen der aktuellen Daten (s. Doku zur Wetterstation)
befehl = bytearray([0x02,ord('m'),ord('m'),0x03])
#Befehl schicken
ser.write(befehl)
#Auslesen der Daten
zeile = ser.readline()
print("read: "+str(zeile))
#\n\r in der Zeile loeschen
zeile = zeile.rstrip()
# Byte-Liste in String-Liste umwandeln
zeile = zeile.decode('utf-8')
#Unnoetige Zeichen loeschen (entstehen durch nicht vorhandene/angeschlossene Sensoren)
zeile = zeile.replace('---.- ----.-','')
zeile = zeile.replace('---.-','')
# Zeile in Daten aufteilen
daten = zeile.split()
if len(daten)<5:
raise Exception('Read Error')
#Daten in Datenbank schreiben
cursor = connection.cursor()
cursor.execute("INSERT INTO wetter (windspeed,winddirection,temp,humidity,radiation,pressure,precipitation) VALUES (%s,%s,%s,%s,%s,%s,%s)"%(daten[1],daten[2],daten[3],daten[4],daten[6],daten[5],daten[7],))
cursor.close()
connection.commit()
#eine Sekunde warten
sleep(1)
# print(daten)
# print('Aufgetrennte Daten: '+daten)
except Exception as e:
print(e)
with open("/var/www/html/errorlog.txt", "a") as myfile:
myfile.write(strftime("%Y-%m-%d %H:%M:%S", gmtime())+"\t"+str(e)+"\n")
sleep(10)
|
from datetime import datetime
from flask import request
from flask_restful import Resource, abort
from flask_jsonpify import jsonify
from webargs import fields
from webargs.flaskparser import use_args
from shared import db
from models.timetable import Timetable
class TimetableHandler(Resource):
get_and_delete_args = {
'id': fields.Integer(required=True)
}
post_args = {
'name': fields.String(required=True)
}
put_args = {
'id': fields.Integer(required=True),
'new_name': fields.String(required=True)
}
@use_args(get_and_delete_args)
def get(self, args):
## Try and find the timetable
timetable = Timetable.query.filter_by(identifier=args['id']).first()
## Check we found the timetable
if timetable is None:
abort(404, message="Timetable not found")
## Return out result
response = {
"meta": {},
"links": {
"self": request.url
},
"data": {
"timetable": timetable.serialize
}
}
return jsonify(response)
@use_args(post_args)
def post(self, args):
## Check if the Timetable already exists
doesTimetableExist = Timetable.query.filter_by(name=args['name']).first()
if doesTimetableExist is not None:
abort(422, message='The supplied Timetable Name already exists')
## Map the data to a dictionary
timetableData = {}
timetableData['name'] = args['name']
## Create the timetable
timetable = Timetable(**timetableData)
db.session.add(timetable)
db.session.commit()
return "", 201
@use_args(put_args)
def put(self, args):
## Check the timetable we are renaming exists in the system
doesTimetableExist = Timetable.query.filter_by(identifier=args['id']).first()
if doesTimetableExist is None:
abort(404, message="Timetable not found")
## Check the new name for the timetable doesn't already exist
doesNewTimetableExist = Timetable.query.filter_by(name=args['new_name']).first()
if doesNewTimetableExist is not None:
abort(422, message='The new timetable name already exists')
## Map the request data to a dictionary
timetableData = {}
timetableData['name'] = args['new_name']
## Update the timetable using the dictionary
timetable = Timetable.query.filter_by(identifier=doesTimetableExist.identifier)
timetable.update(timetableData)
timetable.first().updated_at = datetime.now()
db.session.commit()
## Return that the resource has been updated
return "", 202
@use_args(get_and_delete_args)
def delete(self, args):
## Check the timetable we are renaming exists in the system
timetable = Timetable.query.filter_by(identifier=args['id']).first()
if timetable is None:
abort(404, message="Timetable not found")
## Execute the delete
db.session.delete(timetable)
db.session.commit()
return "", 202
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.