index
int64
0
1,000k
blob_id
stringlengths
40
40
code
stringlengths
7
10.4M
18,200
8e48551596af5218ce9310ead61d60808f9b47d2
#ezhil n=int(input()) lis=[0,1,2,3,4,5,6,7,8,9] if n in lis: print("yes") else: print("no")
18,201
5cd171b06424c2e33e5d2d549cba51fcf69e4707
#!/usr/bin/python import re import shutil f1 = open("/etc/puppet/puppet.conf", "r") f2 = open("/tmp/puppet_tmp", "w") pattern= 'main' append = 'server = puppet.forthscale.com' for line in f1.readlines(): definition = re.findall(pattern, line) if definition: f2.write(line +'\n' + append + '\n') else: f2.write(line) f1.close() f2.close() shutil.copy2('/tmp/puppet_tmp', '/etc/puppet/puppet.conf')
18,202
cabe656231bb1815de28a95b1f60c89740afb5f3
import numpy as np import cupy as cp import pandas as pd import time, argparse from sklearn.cluster import KMeans as sk_kmeans from cuml.cluster import KMeans as cuml_kmeans from cuml.dask.cluster import KMeans as dask_kmeans from dask_cuda import LocalCUDACluster from dask.distributed import Client import cudf, dask_cudf, dask args = argparse.ArgumentParser() args.add_argument('--npartitions', type=int, help='number of data partitions') args.add_argument('--single_gpu', type=bool, help='single or multi gpu') args.add_argument('--dataset', type=str, help='type of data loading in') args = args.parse_args() npartitions = args.npartitions single_gpu = args.single_gpu datatype = args.dataset def sklearn_km(train_pca, val_pca, nclusters): print(str(time.ctime()) + ": Implementing KMeans Clustering with Sklearn...") start = time.time() kmeans = sk_kmeans(n_clusters=nclusters, random_state=0) kmeans.fit(train_pca) labels_val = np.array(kmeans.predict(val_pca)) end = time.time() sk_diff = round(end - start, 2) print(str(time.ctime()) + ": Finished KMeans Clustering with Sklearn in: " + str(sk_diff) + " seconds!") return labels_val def rapids_km(train_pca, val_pca, nclusters, single_gpu): kmeans = None name = None if single_gpu: print(str(time.ctime()) + ": Transferring CPU->GPU...") train_pca = cp.array(train_pca) val_pca = cp.array(val_pca) print(str(time.ctime()) + ": Successfully Transferred!") name = "Rapids" kmeans = cuml_kmeans(n_clusters=nclusters, random_state=0) else: print(str(time.ctime()) + ": Transferring CPU->GPUs...") train_pca = dask_cudf.from_dask_dataframe(dask.dataframe.from_pandas(pd.DataFrame(train_pca)), npartitions=npartitions).persist() val_pca = dask_cudf.from_dask_dataframe(dask.dataframe.from_pandas(pd.DataFrame(val_pca)), npartitions=npartitions).persist() print(str(time.ctime()) + ": Successfully Transferred!") cluster = LocalCUDACluster(n_workers=npartitions, threads_per_worker=1) client = Client(cluster) name = "Dask" kmeans = dask_kmeans(n_clusters=nclusters, random_state=0) print(str(time.ctime()) + ": Implementing KMeans Clustering with " + name + "...") kmeans.fit(train_pca) runtimes = np.array([]) for i in range(10): start = time.time() labels_val = cp.asnumpy(cp.array(kmeans.predict(val_pca))) end = time.time() r_diff = end - start runtimes = np.append(runtimes, r_diff) avg_runtime = round(sum(runtimes[3:]) / len(runtimes[3:]), 2) print(str(time.ctime()) + ": Finished KMeans Clustering with " + name + " in: " + str(avg_runtime) + " seconds!") return labels_val train_pca = None val_pca = None npzfile = None nc = 0 if datatype == 'SARSMERSCOV2': npzfile = np.load('/gpfs/alpine/gen150/scratch/arjun2612/ORNL_Coding/Code/sars_mers_cov2_dataset/smc2_dataset.npz') training = npzfile['train3D'] validation = npzfile['val3D'] nc = 3 train_pca = np.reshape(training, (training.shape[0], -1)) val_pca = np.reshape(validation, (validation.shape[0], -1)) elif datatype == 'HEA': npzfile = np.load('/gpfs/alpine/gen150/scratch/arjun2612/ORNL_Coding/Code/hea_dataset/hea_dataset.npz') train_pca = npzfile['train'] val_pca = npzfile['val'] nc = 5 label_validation = npzfile['labval'] lv_onehot = npzfile['lvoh'] l = np.array([]).astype(int) for i in range(len(lv_onehot)): l = np.append(l, np.argmax(lv_onehot[i])) sk_labels_val = sklearn_km(train_pca, val_pca, nc) accuracy = (sum(sk_labels_val == l) / len(l)) * 100 print('Accuracy: {}'.format(accuracy)) r_labels_val = rapids_km(train_pca, val_pca, nc, single_gpu) accuracy = (sum(r_labels_val == l) / len(l)) * 100 print('Accuracy: {}'.format(accuracy)) if datatype == 'SARSMERSCOV2': npzfile2 = np.load('/gpfs/alpine/gen150/scratch/arjun2612/ORNL_Coding/Code/pca/smc2_sk_clusterfiles.npz') reduced_val = npzfile2['redval'] np.savez('smc2_sk_plotting.npz', reslab=sk_labels_val, lv=label_validation, redval=reduced_val) np.savez('smc2_r_plotting.npz', reslab=r_labels_val, redval=reduced_val, lv=label_validation) elif datatype == 'HEA': npzfile2 = np.load('/gpfs/alpine/gen150/scratch/arjun2612/ORNL_Coding/Code/pca/hea_sk_clusterfiles.npz') reduced_val = npzfile2['redval'] np.savez('hea_sk_plotting.npz', reslab=sk_labels_val, lv=label_validation, redval=reduced_val) np.savez('hea_r_plotting.npz', reslab=r_labels_val, redval=reduced_val, lv=label_validation)
18,203
e00ae040a2bdc16d54aead70b73e33e18f3ec14b
yy = 1995 mm = 12 dd = 13 print("My Birthday is ", str(yy), " / ", str(mm)," / ",str(dd))
18,204
8c93e8b6669b7a8dbb7da1dd4ef23be94f3f9fda
import typing as t from datetime import datetime from sqlalchemy.sql import func from sqlalchemy.orm import Session from app.db.session import SessionLocal from app import crud, models db = SessionLocal() CalcType = t.NewType('Calculation Type', str) def get_claim_bill( _db: Session = db, *, start: datetime = None, end: datetime = None, store_internal_id: int = None, owner_id: int = None, kind: models.ClaimKind = None, calculation: CalcType = "avg" ): """ WE USE THIS FUNCTION TO CREATE A CLAIM QUERY WITH OPTIONAL PARAMETER AND CALCULATE BY SQLALCHEMY.SQL.FUNC """ query = crud.claim.get_query(_db) if start and end: query = query.filter(models.Claim.created_at.between(start, end)) if store_internal_id: query = query.filter(models.Claim.store_internal_id == store_internal_id) if owner_id: query = query.filter(models.Claim.owner_id == owner_id) if kind: query = query.filter(models.Claim.kind == kind) bill = query.with_entities(getattr(func, calculation)(models.Claim.bill).label("_bill")).first() bill_round = round(bill[0], 2) if bill[0] is not None else 0.00 return bill_round def get_clean_discharge( _db: Session = db, *, start: datetime = None, end: datetime = None, store_internal_id: int = None, owner_id: int = None ) -> float: """ TO AVOID MULTIPLE COUNTED DISCHARGES WE ONLY ACCEPT ONE DISCHARGE PER CONTRACT MEANS WE VE TO EXCLUDE ALL OTHER ROWS WITH THE SAME CONTRACT NR """ exclude_contracts: t.List[int] = [] discharges: t.List[float] = [] claims = crud.claim.get_query(_db) if start and end: claims = claims.filter(models.Claim.created_at.between(start, end)) # noqa if store_internal_id: claims = claims.filter(models.Claim.store_internal_id == store_internal_id) # noqa if owner_id: claims = claims.filter(models.Claim.owner_id == owner_id) # noqa # FOR BETTER SQL PERFORMANCE WE ENCLOSE OUR QUERY: EXCLUDE ALL WITH DISCHARGE IS NULL; claims = claims.filter(models.Claim.discharge.isnot(None)) # noqa claims: t.List[models.Claim] = claims.all() # noqa for claim in claims: if claim.contract_nr not in exclude_contracts: exclude_contracts.append(claim.contract_nr) discharges.append(claim.discharge) total_discharges: float = sum(discharges) total_discharges_round: float = round(total_discharges, 2) return total_discharges_round
18,205
485bc175ffbca72706f0eeb92bef54ce8edfe39f
from oppgave2 import generateA import numpy as np from scipy.linalg import solve # Gitte bjelkeparametre. length = 2.0 width = 0.3 thickness = 0.03 density = 480.0 # kg/m^3 # Andre konstanter. g = 9.8 # gravity, m/s^2 E = 1.3 * pow(10, 10) # Materialkonstanten N/m^2 I = (width * pow(thickness, 3)) / 12.0 #Arealmomentet. f = - density * width * thickness * g # Losning paa Ay=b med storrelse (n) def solve_3(n): # Deler bjelkens lengde saa alle segmentene er like. h = length / n # Genererer baandmatrise (A matrisen) matrixA = generateA(n) # Genererer b-matrisen. matrixB = np.array([[(pow(h, 4) / (E * I)) * f]] * n) # Loser for Y basert paa A og B. matrixY = solve(matrixA, matrixB) # Finner kondisjonstallet. kondisjonstall = np.linalg.cond(matrixA) # Returnerer losningen og kondisjonstallet. return matrixY, kondisjonstall if __name__ == '__main__': matrixY, kondisjonstall = solve_3(10) print(matrixY)
18,206
7d2da7b63bdf09d56f9328f4f6c00afba4318eaa
''' Created on 2018年11月16日 @author: qiguangqin ''' import numpy as np import matplotlib.pyplot as plt from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler def get_data(): np.random.seed(666) x=np.random.uniform(-3,3,size=100) X=x.reshape(-1,1) y=0.5*x**2+x+2+np.random.normal(0,1,size=100) return X,y def get_data2(): X=np.arange(1,11).reshape(-1,2) def poly_regression_repre(): ''' 在数据预处理时候,使用 poly_进行,类似于StandScaler ''' X,y=get_data() poly=PolynomialFeatures(degree=2) ''' 表示为原始数据集,添加最高几次幂,X2 增加X 的二次项,作为new_feauture,使用linear_regression ''' poly.fit(X) X2=poly.transform(X) lin_reg=LinearRegression() lin_reg.fit(X2,y) print(X2[:2,:3]) ## 在左边增加了一列,1 X^0 y_predict=lin_reg.predict(X2) print("coef=",lin_reg.coef_,"interception=",lin_reg.intercept_) plt.scatter(X[:,0],y,alpha=0.5) plt.plot(np.sort(X[:,0]),y_predict[np.argsort(X[:,0])],color='m') plt.show() def using_pipe_line(): ''' 使用pipe_line,将polynomial feature,standscaler,lineargression ,做出一个管道,因为 sklearn 并没有给出一个 完整 poly 的类 ''' X,y=get_data() poly_reg=Pipeline([("poly",PolynomialFeatures(degree=2)),("stand_scaler",StandardScaler()),("lin_reg",LinearRegression())]) poly_reg.fit(X,y) ## 需要将X,y 全部传入 y_predict=poly_reg.predict(X) plt.scatter(X[:,0],y,alpha=0.5) plt.plot(np.sort(X[:,0]),y_predict[np.argsort(X[:,0])],alpha=0.7,color='m') def main(): poly_regression_repre() #using_pipe_line() if __name__ == '__main__': main()
18,207
8a8549f1ffa2ce97ec8a9c4fb179d909351e37de
# api https://api.github.com/repos/channelcat/sanic # web_page https://github.com/channelcat/sanic import requests import webbrowser import time # api指定了follow的这个人star的所有项目,该用户是kennethreitz api = "https://api.github.com/users/fdlancelee/starred" # 先访问一次api,获取star列表 info = requests.get(api).json() print(info.keys) #定义一个空的list starred = [] # 将star列表中的项目id存到list变量中 for i in info: #append 是在list后面添加一个元素 starred.append(i['id']) ''' while True: # 获取star的项目 info = requests.get(api).json() for i in info: # 如果当前项目id在list变量中不存在,则说明是刚刚star的项目 if not i['id'] in starred: starred.append(i['id']) # 获取项目名称 repo_name = i['name']try: pass except Exception as e: raise e finally: pass # 获取作者名称 owner = i['owner']['login'] # 在浏览器中打开项目 web_page = "https://github.com/" + owner + "/" + repo_name webbrowser.open(web_page) # 每隔600秒(10分钟)检查一次 time.sleep(600) '''
18,208
ae2815a6c46cfbaf6ddfaefa66df805df7c19914
import socketio import time import os import sys import json from zipfile import ZipFile contents = [{'filename': 'main/', 'bytes': ''}, {'filename': 'main/main.sh', 'bytes': 'python3 main.py\n'}, {'filename': 'main/main.py', 'bytes': "def fibonacci(n):\n if n < 1:\n print('err')\n return -1\n if n==1 or n==2:\n return 1\n return fibonacci(n-1)+fibonacci(n-2)\nprint(fibonacci(30))\n"}] #spoof of fib contents def process_zip_task(contents): owd = os.getcwd() zipObj = ZipFile('temp.zip', 'w') for obj in contents: fn = obj['filename'] byts = obj['bytes'] zipObj.writestr(fn,byts) zipObj.extractall(path='temp') os.system('rm temp.zip') os.chdir(owd+'/temp/main') os.system('mkdir output_tmp') os.system('chmod u+x main.sh') os.system('./main.sh > ../../output') os.system('echo $? > ../../status') os.system(f'mv output_tmp {owd}') #hmm what happens if no output folder, need to zip os.chdir(owd) os.system('rm -rf temp') start = time.time() process_zip_task(contents) end = time.time() print(end-start)
18,209
822d48555c07137af7221d19348c36d8f6392dd7
#!/usr/bin/env python # coding: utf-8 # In[4]: import requests from bs4 import BeautifulSoup import RReadWriteCSV # In[5]: url="https://thehill.com" # In[6]: def scraping(array_of_ids): count=0 previous='' page = requests.get(url) soup = BeautifulSoup(page.content, 'html.parser') links=soup.find_all('a', href=True) for a in links: if len(a['href']) > 50 and previous != a['href']: previous=a['href'] try: article=requests.get(url + a['href']) soup = BeautifulSoup(article.content, 'html.parser') news={} news['_id']=previous if news['_id'] in array_of_ids: continue array_of_ids.append(news['_id']) news["source"]=url+previous news["title"]=soup.find(class_="content-wrapper title").find("h1").get_text() date=soup.find(class_="submitted-date").get_text().split(" ") news["date"]=date[0] news["text"]='' p_array=soup.find(class_="field-item even").find_all('p') for p in p_array: news["text"]+=p.get_text() count+=1 RReadWriteCSV.write(news) except: continue print(str(count) + 'scraped articles')
18,210
2c77d35a46947967219b7f48ff3c7a9abf7b9b7d
# ************* Lesson 6 ************** # ************* Tuples(Constant variables or Constant Lists)************** # a tuple is a data structure that has a constant data value (Can't be changed) # [tuple object doesn't support item assignment] # create a tuple PI = (3.1415) # the value of this tuple(variable) can't be changed print(PI) # tuple list coordinates = (4, 5) print(coordinates[0], coordinates[1])
18,211
52537f1bd2af4be3b7e9a6204d31628ea1f0dd15
# -*- coding: utf8 -*- import pypyodbc import sqlite3 def sql_tool_sm(query,var=''): connection = pypyodbc.connect('Driver={SQL Server};Server=10.62.24.161\SQLEXPRESS;Database=tool_SM;uid=aos;pwd=aos159753') cursor = connection.cursor() cursor.execute(query,var) if query.lower().startswith('select') and not query.lower().startswith('select * into'): x = cursor.fetchall() cursor.close() return x else: cursor.commit() cursor.close() ### def sqlite(query,var=''): connection = sqlite3.connect(r'C:\Users\hp43\Desktop\sm_tool.db') cursor = connection.cursor() # connection.text_factory = str cursor.execute(query,var) if query.lower()[:6] == 'select': x = cursor.fetchall() connection.close() return x elif query.lower()[:6] == 'create': connection.close() else: connection.commit() connection.close() bds_biet_thu_field = ['ten_du_an', 'ten_duong', 'ten_tang', 'ma_can', 'dien_tich_dat', 'dien_tich_san_xay_dung', 'tong_gia_tri_xay_tho', 'tong_gia_tri_hoan_thien', 'don_gia_dat', 'don_gia_ctxd'] chung_cu_field = ['ten_du_an', 'ten_toa_duong_day_khu', 'ten_tang_loai_nha', 'ma_can', 'dien_tich', 'loai_dien_tich', 'don_gia'] dac_diem_vi_tri = 3 data_mb = 10 rong_ngo_truc_chinh_yeu_to = 5 hinh_dang_quy_mo_mat_tien = 4 gia_uy_ban = 9 # for r in sql_tool_sm("SELECT * from bds_lien_ke_bt"): # sqlite("INSERT INTO bds_lien_ke_bt VALUES({})".format(",".join(["?"]*len(bds_biet_thu_field))), r) # for r in sql_tool_sm("SELECT * from data_chung_cu"): # sqlite("INSERT INTO data_chung_cu VALUES({})".format(",".join(["?"]*len(chung_cu_field))), r) # for r in sql_tool_sm("SELECT * from dac_diem_vi_tri"): # sqlite("INSERT INTO dac_diem_vi_tri VALUES({})".format(",".join(["?"]*dac_diem_vi_tri)), r) # for r in sql_tool_sm("SELECT * from khoang_cach_den_truc_chinh"): # sqlite("INSERT INTO khoang_cach_den_truc_chinh VALUES({})".format(",".join(["?"]*rong_ngo_truc_chinh_yeu_to)), r) # for r in sql_tool_sm("SELECT * from yeu_to"): # sqlite("INSERT INTO yeu_to VALUES({})".format(",".join(["?"]*rong_ngo_truc_chinh_yeu_to)), r) # for r in sql_tool_sm("SELECT * from do_rong_ngo"): # sqlite("INSERT INTO do_rong_ngo VALUES({})".format(",".join(["?"]*rong_ngo_truc_chinh_yeu_to)), r) for r in sql_tool_sm("SELECT * from quy_mo"): sqlite("INSERT INTO quy_mo VALUES({})".format(",".join(["?"]*hinh_dang_quy_mo_mat_tien)), r) for r in sql_tool_sm("SELECT * from hinh_dang"): sqlite("INSERT INTO hinh_dang VALUES({})".format(",".join(["?"]*hinh_dang_quy_mo_mat_tien)), r) for r in sql_tool_sm("SELECT * from mat_tien"): sqlite("INSERT INTO mat_tien VALUES({})".format(",".join(["?"]*hinh_dang_quy_mo_mat_tien)), r) for r in sql_tool_sm("SELECT * from khung_gia_uy_ban"): sqlite("INSERT INTO khung_gia_uy_ban VALUES({})".format(",".join(["?"]*gia_uy_ban)), r) # for r in sql_tool_sm("SELECT * from data_mb"): # sqlite("INSERT INTO data_mb VALUES({})".format(",".join(["?"]*data_mb)), r)
18,212
61c9545e14519ee2a593d443ef39a2487a7b300d
# Face Recognition by Maker Asia Co., Ltd. # Distributed by AiIoTShop.com # Author by Comdet import sensor, image, time, lcd, os import KPU as kpu import ulab as np from Corgi85 import corgi85 from Dude import dude #=== setup camera ===# sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.set_vflip(1) sensor.run(1) #=== setup LCD screen ===# lcd.init() lcd.rotation(0) #=== Helper Function ===# face_dataset = [] last_face_id = 0; def clear_dataset(file_name,dataset): os.remove(file_name) dataset = [] def match(tester,dataset): codex = np.array(dataset) t = np.array(tester) for i in range(codex.shape()[0]): codex[i,:] = codex[i,:] - t data = np.sum(codex * codex, axis=1) data = np.sqrt(data) if codex.shape()[0] == 1: return data,1 ind = np.argmin(data) return data[ind][0],ind + 1 def read_dataset(file_name): data = [] if file_name not in os.listdir(): open(file_name, 'x').close() f = open(file_name,"r") for line in f: line=line.rstrip('\n').rstrip('\r') rows = [float(row) for row in line.split(',')] data.append(rows) f.close() return data def append_dataset(file_name,data): face_dataset.append(data) f = open(file_name,"a") str_list_target = ["{:0.4f}".format(x) for x in data] str_target = ','.join(str_list_target) f.write(str_target) f.close() print("save to dataset success") def send_sheet(face_id): return #=== AI Models ===# task_face_detect = kpu.load(0x200000) task_face_encode = kpu.load(0x300000) kpu.set_outputs(task_face_encode,0,1,1,128) anchor = (1.889, 2.5245, 2.9465, 3.94056, 3.99987, 5.3658, 5.155437, 6.92275, 6.718375, 9.01025) kpu.init_yolo2(task_face_detect, 0.5, 0.3, 5, anchor) #====== config ======# face_threshold = 15 dataset_filename = "faces.csv" #====================# #=== SETUP ===# #clear_dataset(dataset_filename,face_dataset) face_dataset = read_dataset(dataset_filename); corgi85.IFTTT_init("corgi_detect","0hI55mQkUiimG6RIjpWhp") #=== wait wifi connect ===# while corgi85.wifi_check() == 0: print("WIFI Connecting ...") time.sleep(1) while(True): img = sensor.snapshot() #--- face detect ---# faces = kpu.run_yolo2(task_face_detect, img) if faces: #--- check face size ---# x1 = faces[0].x() - 10 y1 = faces[0].y() - 10 w = faces[0].w() + 20 h = faces[0].h() + 10 if w > 80 and h > 80: #--- crop target face ---# face = img.cut(x1,y1,w,h) face = face.resize(112,112) a = img.draw_rectangle(x1,y1,w,h,color = (255,0,0), thickness=2) a = face.pix_to_ai() #--- encode face ---# fmap = kpu.forward(task_face_encode,face) encoded = fmap[:] #--- save new face ---# if dude.IsBootPressed() : # User pressed BOOT button time.sleep_ms(500) lcd.clear((0,255,0)) time.sleep_ms(2000) append_dataset(dataset_filename,encoded) print("saved") if face_dataset: #--- find match ---# score, pid = match(encoded,face_dataset) print(score) print("Match ID %d score = %.3f" % (pid,score)) if score < face_threshold : # recognized a = img.draw_rectangle(x1,y1,w,h,color = (0,255,0), thickness=2) a = img.draw_string(x1+5,y1+10,"ID:%d" % pid,color=(0,255,0),scale=3) # ... DO SOMETHING HERE ... if last_face_id != pid: last_face_id = pid print("======= send data =======") corgi85.IFTTT_setParam(1,str(pid)) corgi85.IFTTT_fire() img = img.resize(240,240) lcd.display(img)
18,213
8aeddc17cc1a26f32170374e3b2adbeda6a48467
from socket import * from time import sleep class send: def __init__(self, ip): self.ss = socket(AF_INET, SOCK_STREAM) self.ss.connect((ip), 13333) def send(self, code): print('ready to send') self.ss.send(code.encode()) print('sent')
18,214
5ebb93a7cf8dfac7637f76f860549da05a491c4f
from flask import Flask, render_template, flash, redirect, url_for, session, request, logging from flaskext.mysql import MySQL from wtforms import Form, StringField, TextAreaField, PasswordField, validators from passlib.hash import sha256_crypt from functools import wraps from datetime import datetime app = Flask(__name__,static_url_path='/static') #mysql mysql = MySQL() mysql.init_app(app) # Config MySQL app.config['MYSQL_DATABASE_HOST'] = 'localhost' app.config['MYSQL_DATABASE_USER'] = 'root' app.config['MYSQL_DATABASE_PASSWORD'] = 'password' app.config['MYSQL_DATABASE_DB'] = 'flask' app.config['MYSQL_CURSORCLASS'] = 'DictCursor' TEMPLATES_AUTO_RELOAD=True @app.route("/") def index(): return render_template('index.html') @app.route('/login', methods=['GET', 'POST']) def login(): if request.method == 'POST': username = request.form['username'] password_candidate = request.form['password'] conn = mysql.connect() cursor = conn.cursor() result = cursor.execute("SELECT * FROM users WHERE username = %s", [username]) if result > 0: data = cursor.fetchone() password = data[2] if sha256_crypt.verify(password_candidate, password): session['logged_in'] = True session['username'] = username return redirect(url_for('dashboard')) else: return render_template('wrong_credentials.html') cursor.close() else: return render_template('wrong_credentials.html') return render_template('login.html') @app.route('/dashboard') def dashboard(): if session['logged_in'] != True: return render_template('index.html') else: conn = mysql.connect() cursor = conn.cursor() result = cursor.execute("SELECT * FROM threads") threads = cursor.fetchall() if result > 0: return render_template('dashboard.html',threads=threads) else: return render_template('dashboardempty.html') # Close connection #cur.close() class ThreadForm(Form): title = StringField('Title', [validators.Length(min=1, max=200)]) body = TextAreaField('Body', [validators.Length(min=30)]) @app.route('/create_thread', methods=['GET', 'POST']) def add_thread(): if session['logged_in'] == True: form = ThreadForm(request.form) if request.method == 'POST' and form.validate(): title = form.title.data body = form.body.data conn = mysql.connect() cursor = conn.cursor() # Execute cursor.execute("INSERT INTO threads(title, body, author) VALUES(%s, %s, %s)",(title, body, session['username'])) # Commit to DB conn.commit() #Close connection conn.close() return render_template('thread_success.html') return render_template('create_thread.html', form=form) else: return render_template('login.html') @app.route('/edit_thread/<string:id>', methods=['GET', 'POST']) def edit_thread(id): if session['logged_in'] != True: return render_template('index.html') else: # Create cursor conn = mysql.connect() cursor = conn.cursor() # Get article by id result = cursor.execute("SELECT * FROM threads WHERE id = %s", [id]) thread = cursor.fetchone() cursor.close() # Get form form = ThreadForm(request.form) # Populate article form fields form.title.data = thread[1] form.body.data = thread[2] if request.method == 'POST' and form.validate(): title = request.form['title'] body = request.form['body'] # Create Cursor conn = mysql.connect() cursor = conn.cursor() app.logger.info(title) # Execute cursor.execute ("UPDATE threads SET title=%s, body=%s WHERE id=%s",(title, body, id)) # Commit to DB conn.commit() #Close connection cursor.close() return render_template('threadupdated.html') return render_template('edit_thread.html', form=form) @app.route('/delete_thread/<string:id>', methods=['POST']) def delete_thread(id): if session['logged_in'] != True: return render_template('index.html') else: # Create cursor conn = mysql.connect() cursor = conn.cursor() # Execute cursor.execute("DELETE FROM threads WHERE id = %s", [id]) conn.commit() #Close connection cursor.close() return render_template('threaddeleted.html') @app.route('/logout') def logout(): session.clear() return redirect(url_for('login')) def is_logged_in(f): @wraps(f) def wrap(*args, **kwargs): if 'logged_in' in session: return f(*args, **kwargs) else: return redirect(url_for('login')) return wrap def current_time(): return {'now': datetime.utcnow()} @app.route('/threads') def threads(): conn = mysql.connect() cursor = conn.cursor() result = cursor.execute("SELECT * FROM threads") threads = cursor.fetchall() if result > 0: return render_template('threads.html', threads=threads) else: return render_template('dashboardempty.html') # Close connection cursor.close() @app.route('/thread/<string:id>/') def thread(id): conn = mysql.connect() cursor = conn.cursor() result = cursor.execute("SELECT * FROM threads WHERE id = %s", [id]) threads = cursor.fetchone() return render_template('thread.html', threads=threads) class RegisterForm(Form): username = StringField('Username', [validators.Length(min=4, max=25)]) email = StringField('Email', [validators.Length(min=6, max=50)]) password = PasswordField('Password', [ validators.DataRequired(), validators.EqualTo('confirm', message='Passwords do not match') ]) confirm = PasswordField('Confirm Password') @app.route('/signup', methods=['GET', 'POST']) def signup(): form = RegisterForm(request.form) if request.method == 'POST' and form.validate(): username = form.username.data email = form.email.data password = sha256_crypt.encrypt(str(form.password.data)) # Create cursor conn = mysql.connect() cursor = conn.cursor() # Execute query cursor.execute("INSERT INTO users(username, email, password) VALUES(%s, %s, %s)", (username, email, password)) #commit conn.commit() flash('You are now registered and can log in', 'success') return redirect('/login') return render_template('signup.html', form=form) # set the secret key. keep this really secret: app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT' if __name__ == '__main__': app.run(Debug=True)
18,215
483b4c8c7f8f2d3e61d4030398e26f1c38b00a56
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.response.AlipayResponse import AlipayResponse from alipay.aop.api.domain.MemberCardUserTransactionDetail import MemberCardUserTransactionDetail class AntMerchantExpandMembercardTransactionBatchqueryResponse(AlipayResponse): def __init__(self): super(AntMerchantExpandMembercardTransactionBatchqueryResponse, self).__init__() self._partner_id = None self._transaction_detail_list = None @property def partner_id(self): return self._partner_id @partner_id.setter def partner_id(self, value): self._partner_id = value @property def transaction_detail_list(self): return self._transaction_detail_list @transaction_detail_list.setter def transaction_detail_list(self, value): if isinstance(value, list): self._transaction_detail_list = list() for i in value: if isinstance(i, MemberCardUserTransactionDetail): self._transaction_detail_list.append(i) else: self._transaction_detail_list.append(MemberCardUserTransactionDetail.from_alipay_dict(i)) def parse_response_content(self, response_content): response = super(AntMerchantExpandMembercardTransactionBatchqueryResponse, self).parse_response_content(response_content) if 'partner_id' in response: self.partner_id = response['partner_id'] if 'transaction_detail_list' in response: self.transaction_detail_list = response['transaction_detail_list']
18,216
182db5f3e343b4fbaabca087e94e2ab9e4c97839
from pyspark.sql import SQLContext from mozetl.testpilot.containers import transform_testpilot_pings def create_ping_rdd(sc, payload): return sc.parallelize([ {'payload': { 'test': '@testpilot-containers', 'other-ignored-field': 'who cares', 'payload': payload }} ]) def create_row(overrides): keys = ["uuid", "userContextId", "clickedContainerTabCount", "eventSource", "event", "hiddenContainersCount", "shownContainersCount", "totalContainersCount", "totalContainerTabsCount", "totalNonContainerTabsCount", "pageRequestCount", "test"] overrides['test'] = '@testpilot-containers' return {key: overrides.get(key, None) for key in keys} def test_open_container_ping(row_to_dict, spark_context): input_payload = { 'uuid': 'a', 'userContextId': 10, 'clickedContainerTabCount': 20, 'event': 'open-tab', 'eventSource': 'tab-bar' } result_payload = input_payload result_payload['userContextId'] = '10' actual = transform_testpilot_pings( SQLContext(spark_context), create_ping_rdd(spark_context, input_payload) ).take(1)[0] assert row_to_dict(actual) == create_row(result_payload) def test_edit_container_ping(row_to_dict, spark_context): input_payload = { 'uuid': 'b', 'event': 'edit-containers' } actual = transform_testpilot_pings( SQLContext(spark_context), create_ping_rdd(spark_context, input_payload) ).take(1)[0] assert row_to_dict(actual) == create_row(input_payload) def test_hide_container_ping(row_to_dict, spark_context): input_payload = { 'uuid': 'a', 'userContextId': 'firefox-default', 'clickedContainerTabCount': 5, 'event': "hide-tabs", 'hiddenContainersCount': 2, 'shownContainersCount': 3, 'totalContainersCount': 5, } actual = transform_testpilot_pings( SQLContext(spark_context), create_ping_rdd(spark_context, input_payload) ).take(1)[0] assert row_to_dict(actual) == create_row(input_payload) def test_close_container_tab_ping(row_to_dict, spark_context): input_payload = { "uuid": 'a', "userContextId": 'firefox-default', "event": "page-requests-completed-per-tab", "pageRequestCount": 2, } actual = transform_testpilot_pings( SQLContext(spark_context), create_ping_rdd(spark_context, input_payload) ).take(1)[0] assert row_to_dict(actual) == create_row(input_payload)
18,217
e291857352fa9457a3c3f31208a3681034c2a53e
import os print("\n\n") print("\t\t\t\t\t\tWelcome to hadoop cluster setup menu") print("\t\t\t\t\t\t------------------------------------") print(''' \t\t\t\t\t\tFor setting hadoop cluster you have to follow certain steps: \t\t\t\t\tStep:1 Give details about namenode. \t\t\t\t\tStep:2 Give details about datanode. \t\t\t\t\tStep:3 give details about client. ''') Namenode_IP = input("\t\t\t\t\t\tGive IP at which you want to configure namenode: ") print("Downloading and Configuring Hadoop and JAVA") # copy down.py file that contains code for downloading and installing hadoop and java rpm for linux os.system("scp down.sh root@{}:/root/".format(Namenode_IP)) os.system("ssh root@{} sudo chmod +x down.sh ".format(Namenode_IP)) os.system("ssh root@{} sudo sh down.sh".format(Namenode_IP)) #copy namenode.py into instance os.system("scp nameNode.py root@{}:/root/".format(Namenode_IP)) #install python3 on the instance os.system("sleep 2") os.system("ssh root@{} sudo yum install python3 -y".format(Namenode_IP)) #setup namenode core-site.xml and hdfs-site.xml os.system("sleep 2") os.system("ssh root@{} sudo python3 nameNode.py".format(Namenode_IP)) #format the namenode os.system("sleep 2") os.system("ssh root@{} sudo hadoop namenode -format".format(Namenode_IP)) #start namenode os.system("sleep 2") os.system("ssh root@{} sudo hadoop-daemon.sh start namenode".format(Namenode_IP)) Datanode_IP = [] count_datanode = int(input("\t\t\t\t\t\tHow many datanode you want to configure: ")) for i in range(0,count_datanode): d_ip = input("\t\t\t\t\t\tGive IP at which you want to configure datanode{}:".format(i+1)) Datanode_IP.append(d_ip) print("Downloading and Configuring Hadoop and JAVA") os.system("scp down.sh root@{}:/root/".format(Datanode_IP[i])) os.system("ssh root@{} sudo chmod +x down.sh ".format(Datanode_IP[i])) os.system("ssh root@{} sudo sh down.sh".format(Datanode_IP[i])) os.system("scp dataNode.py root@{}:/root/".format(Datanode_IP[i])) os.system("sleep 2") os.system("ssh root@{} sudo yum install python3 -y".format(Datanode_IP[i])) os.system("sleep 2") #setup datanode core-site.xml and hdfs-site.xml os.system("ssh root@{} sudo python3 dataNode.py".format(Datanode_IP[i])) os.system("sleep 2") #start datanode os.system("ssh root@{} sudo hadoop-daemon.sh start datanode".format(Datanode_IP[i])) Client_IP = [] count_client = int(input("\t\t\t\t\t\tHow many client you want to configure: ")) for i in range(0,count_client): c_ip = input("\t\t\t\t\t\tGive IP at which you want to configure client: ") Client_IP.append(c_ip) print("Downloading and Configuring Hadoop and JAVA") os.system("scp down.sh root@{}:/root/".format(Client_IP[i])) os.system("ssh root@{} sudo chmod +x down.sh ".format(Client_IP[i])) os.system("ssh root@{} sudo sh down.sh".format(Client_IP[i])) os.system("sleep 2") os.system("scp client.py root@{}:/root/".format(Client_IP[i])) os.system("sleep 2") os.system("ssh root@{} sudo yum install python3 -y".format(Client_IP[i])) os.system("sleep 2") #setup datanode core-site.xml and hdfs-site.xml os.system("sleep 2") os.system("ssh root@{} sudo python3 client.py".format(Client_IP[i]))
18,218
f7279a1cde6689822bd01c479f27d959922a03ee
from pizza_parlour import app def test_pizza(): """Test index page routing""" response = app.test_client().get('/') assert response.status_code == 200 assert response.data == b'Welcome to the Pizza Parlour' def test_menu(): """Test full menu GET request""" response = app.test_client().get('/v1/resources/menu/all') assert response.status_code == 200 assert response.content_type == 'application/json' def test_item(): """Test specific item GET request""" # 404 reponses response = app.test_client().get('/v1/resources/menu?') assert response.status_code == 404 response = app.test_client().get('/v1/resources/menu?itype=pizza') assert response.status_code == 404 response = app.test_client().get('/v1/resources/menu?itype=pizza&item=pepperoni') assert response.status_code == 404 response = app.test_client().get('/v1/resources/menu?item=pepperoni') assert response.status_code == 404 # 204 responses response = app.test_client().get('/v1/resources/menu?item=p&itype=pizza&size=small') assert response.status_code == 204 response = app.test_client().get('/v1/resources/menu?item=pepperoni&itype=p&size=small') assert response.status_code == 204 response = app.test_client().get('/v1/resources/menu?item=pepperoni&itype=pizza&size=s') assert response.status_code == 204 response = app.test_client().get('/v1/resources/menu?itype=topping&item=b') assert response.status_code == 204 # 200 responses response = app.test_client().get('/v1/resources/menu?itype=topping&item=beef') assert response.status_code == 200 assert response.content_type == 'application/json' assert isinstance((response.json)['price'], int) response = app.test_client().get('/v1/resources/menu?itype=pizza&item=pepperoni&size=small') assert response.status_code == 200 assert response.content_type == 'application/json' assert isinstance((response.json)['price'], int) def test_crud(): ## Create order ## # 200 response order = { "pizzas": [ {"type": "pepperoni", "size": "small", "toppings": "mushrooms"} ], "drinks": [ {"type": "coke", "size": "medium"} ] } response = app.test_client().post('/v1/orders', json=order) assert response.status_code == 200 assert isinstance((response.json)['order_number'], int) # 400 responses order = { "pizzas": [ {"size": "small", "toppings": "mushrooms"} ], "drinks": [ {"type": "coke", "size": "medium"} ] } response = app.test_client().post('/v1/orders', json=order) assert response.status_code == 400 response = app.test_client().post('/v1/orders') assert response.status_code == 400 # 204 responses order = { "pizzas": [ {"type": "pepperon", "size": "small", "toppings": "mushrooms"} ], "drinks": [ {"type": "coke", "size": "medium"} ] } response = app.test_client().post('/v1/orders', json=order) assert response.status_code == 204 ## Read order ## # 200 response response = app.test_client().get('/v1/orders/1') assert response.status_code == 200 ## Update order ## # 200 response order = { "pizzas": [ {"type": "pepperoni", "size": "medium", "toppings": "mushrooms"} ], "drinks": [] } response = app.test_client().put('/v1/orders/1', json=order) assert response.status_code == 200 # 204 response order = { "pizzas": [ {"type": "pepperon", "size": "medium", "toppings": "mushrooms"} ], "drinks": [] } response = app.test_client().put('/v1/orders/1', json=order) assert response.status_code == 204 ## Delivery ## # 200 response delivery_info = { "order_number": 1, "address": "1001 bay", "details": "no green onions", "delivery_number": "inhouse2" } response = app.test_client().post('/v1/orders/inhouse', json=delivery_info) assert response.status_code == 200 delivery_info = { "order_number": 1, "address": "1001 bay", "details": "no green onions", "delivery_number": "ubereats14" } response = app.test_client().post('/v1/orders/ubereats', json=delivery_info) assert response.status_code == 200 delivery_info = "order_number,address,details,delivery_number\n1,1001 bay,no green onions,foodora13" response = app.test_client().post('/v1/orders/foodora', data=delivery_info) assert response.status_code == 200 # 204 responses delivery_info = { "order_number": 2, "address": "1001 bay", "details": "no green onions", "delivery_number": "ubereats14" } response = app.test_client().post('/v1/orders/ubereats', json=delivery_info) assert response.status_code == 204 response = app.test_client().post('/v1/orders/inhouse', json=delivery_info) assert response.status_code == 204 delivery_info = "order_number,address,details,delivery_number\n2,1001 bay,no green onions,foodora13" response = app.test_client().post('/v1/orders/foodora', data=delivery_info) assert response.status_code == 204 # 400 responses delivery_info = { "order_numbe": 2, "address": "1001 bay", "details": "no green onions", "delivery_number": "ubereats14" } response = app.test_client().post('/v1/orders/ubereats', json=delivery_info) assert response.status_code == 400 response = app.test_client().post('/v1/orders/inhouse', json=delivery_info) assert response.status_code == 400 ## Delete order ## response = app.test_client().delete('/v1/orders/1') assert response.status_code == 200 response = app.test_client().delete('/v1/orders/1') assert response.status_code == 204
18,219
06c356faab33c38eb680d44cfa13b57dc0eed90c
from enum import Enum class ActivationLiterals(Enum): RELU = "relu" SOFTMAX = "softmax"
18,220
8d950c02eae7a319210a8ed48b4f4b93da83886d
import hashlib, sys, glob, argparse import os def read_txt(fname): f = open(fname, 'r') content = f.readlines() print(content) return content def sha1(fname): sha1hash = hashlib.sha1() with open(fname,errors='ignore') as handle: #opening the file one line at a time for memory considerations for line in handle: sha1hash.update(line.encode('utf-8')) return(sha1hash.hexdigest()) def look_files(path): list_files = glob.glob(path+"\*") return list_files
18,221
6ac9c3132c2129080359d19d3adb5f4a9cc87c43
def insertionsort(A): for i in range(1,len(A)): key = A[i] j = i - 1 while j >= 0 and A[j] > key : A[j+1] = A[j] j = j - 1 A[j+1] = key A = [ 23, 24, 52, 69, 1, 193, 34, 95, 3, 910 ] insertionsort(A) for i in range(0,len(A)): print(A[i])
18,222
ecfe6c15f81eb4cdffc061d91359e75aac8b7672
#!/usr/bin/env python #-*- coding: utf-8 -*- import cv2 import sys import gc from face_keras import Model import roslib import rospy from std_msgs.msg import String from std_msgs.msg import Int8 from sensor_msgs.msg import Image, CameraInfo from cv_bridge import CvBridge, CvBridgeError import numpy as np SENSOR_TOPIC = '/camera/rgb/image_raw' #人脸识别分类器本地存储路径 cascade_path = "/home/kamerider/catkin_ws/src/machine_vision/haarcascade_frontalface_alt.xml" #框住人脸的矩形边框颜色 color = (0, 255, 0) test = cv2.imread('/home/kamerider/catkin_ws/src/machine_vision/DataBase/HOST/1.jpg') #加载模型 model = Model() model.load_model() y = model.face_predict(test) #加载cv_bridge bridge=CvBridge() def predict(frame): #循环检测识别人脸 while True: #图像灰化,降低计算复杂度 frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #使用人脸识别分类器,读入分类器 cascade = cv2.CascadeClassifier(cascade_path) #利用分类器识别出哪个区域为人脸 faceRects = cascade.detectMultiScale(frame_gray, scaleFactor = 1.1, minNeighbors = 3, minSize = (32, 32)) print type(frame) count1=0 if len(faceRects) > 0: for faceRect in faceRects: x, y, w, h = faceRect #截取脸部图像提交给模型识别这是谁 image = frame[y - 20: y + h + 20, x - 20: x + w + 20] cv2.imshow('predict',image) faceID = model.face_predict(image) #如果是“我” if faceID == 0: cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, thickness = 2) print faceID #文字提示是谁 cv2.putText(frame,'TOM', (x + 30, y + 30), #坐标 cv2.FONT_HERSHEY_SIMPLEX, #字体 1, #字号 (255,0,255), #颜色 2) #字的线宽 if count1 == 0: count1=1 cv2.imwrite('/home/kamerider/catkin_ws/test.jpg',frame) if faceID == 1: cv2.rectangle(frame, (x - 10, y - 10), (x + w + 10, y + h + 10), color, thickness = 2) cv2.putText(frame,'HOST', (x + 30, y + 30), #坐标 cv2.FONT_HERSHEY_SIMPLEX, #字体 1, #字号 (255,0,255), #颜色 2) #字的线宽 else: pass cv2.imshow("test", frame) #等待10毫秒看是否有按键输入 k = cv2.waitKey(10) #如果输入q则退出循环 if k & 0xFF == ord('q'): break #释放摄像头并销毁所有窗口 cap.release() cv2.destroyAllWindows() def imageCallback(msg): try: frame = bridge.imgmsg_to_cv2(msg, "bgr8") except CvBridgeError, e: print e # Convert the image to a Numpy array since most cv2 functions # require Numpy arrays. frame = np.array(frame, dtype=np.uint8) predict(frame) if __name__ == '__main__': rospy.init_node('test') print '----------INIT----------' img_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,imageCallback) #捕获指定摄像头的实时视频流 #cap = cv2.VideoCapture(int(sys.argv[1])) rospy.spin()
18,223
89633181aae851be7ca96f5b358187f240a122ea
#!/usr/bin/env python3 from collections import defaultdict Killer = {'R':'P', 'P':'S', 'S':'R'} def robot(C): P = [] while len(P)<500: D = defaultdict(list) for s in C: D[s[len(P)%len(s)]].append(s) if len(D)==3: break elif len(D)==1: U, = iter(D) P.append(Killer[U]) return ''.join(P) else: U,V = iter(D.keys()) if Killer[U]==V: U,V = V,U P.append(U) C = D[U] return 'IMPOSSIBLE' if __name__=='__main__': T = int(input()) for t in range(1,T+1): A = int(input()) C = [input() for _ in range(A)] print('Case #%d: %s' % (t, robot(C)))
18,224
172b32a15187a61884a3b842475c2acea8547a45
from django.conf.urls.defaults import patterns, url from basics.pages.views import PageDetailView urlpatterns = patterns('', url(r'^(?P<path>.*),(?P<pk>\d+)$', PageDetailView.as_view(), {}, 'page_detail'), )
18,225
d5baf2594e0d39845d5c37e7598b3d38cb31349d
''' replace = [1.0] * 3 sum = [] ''' ''' with open('data/test.txt', 'r') as f: for temp in f.readlines(): temp1 = temp.split('\t') temp1.extend(replace) temp1 = list(map(float, temp1)) sum.append(temp1) print(sum) ''' ''' pin def train_batch_x(f, num): sum = [] for i in range(num): temp = f.readline() temp1 = temp.split('\t') temp1 = list(map(float, temp1)) sum.append(temp1) pin = fd.tell() return sum with open('data/test.txt', 'r') as f: text = [] for i in range(3): batch_size = 1 x = train_batch_x(f, batch_size) text.append(x) print(text) ''' text = [] def test_read(): file = 'data/test.txt' with open(file, 'r') as fd: # 获得一个句柄 fd.seek(label, 0) for i in range(2): # 读取三行数据 text.append(fd.readline()) label = fd.tell() print(label) print(text) ''' # 再次阅读文件 with open(file, 'r') as f: # 获得一个句柄 f.seek(label, 0) # 把文件读取指针移动到之前记录的位置 text2 = f.readline() print(text2) # 接着上次的位置继续向下读取 '''
18,226
548d06bbfe6d07a6b36c5e8f7790c097e2c51f0e
"""Setup.""" # -*- coding: utf-8 -*- # @Author: AnthonyKenny98 # @Date: 2019-11-09 17:45:01 # @Last Modified by: AnthonyKenny98 # @Last Modified time: 2019-11-16 14:57:07 from src.groupme.groupme import setup_bot import random options = { "0": "GroupMe" } # Get Heroku App Name appName = input("Please enter your Heroku App Name: ") # Get Config Filename configFile = input( "Please enter your configuration filename (without '.config' suffix): ") print("\n\nApplications\n==========") for key, val in options.items(): print(key + ' : ' + val) print("\n") service = None while (service not in options): service = input("Please input your Application option number: ") service = options[service] # Construct CallBack URL callback_url = 'https://{}.herokuapp.com/{}/{}/{}'.format( appName, service.lower(), configFile, str(random.randint(0, 1000000))) if service == 'GroupMe': setup_bot(callback_url)
18,227
489cb8f54ced229899c3566742bde129cdb95494
# coding: utf-8 # # Hand Shape Classification # ## Imports # In[17]: #get_ipython().run_line_magic('matplotlib', 'inline') def classify(keyword2int): import numpy as np import matplotlib.pyplot as plt import scipy.misc from PIL import Image import pprint pp = pprint.PrettyPrinter(indent = 4) # In[18]: import sklearn from sklearn import datasets import skimage.io # ## Datasets preparation # In[19]: def png2vec(filename): img = Image.open(filename).convert('L') arr = np.array(img) return arr # In[20]: filesetNames = ["%01d" %x for x in range(0,2)] # In[21]: import os images = [] tgt = [] count = 0 img_test = Image.open("./img/0/1.jpg") for curFileset in filesetNames: curPath = "./img/"+ curFileset + "/" for file in os.listdir(curPath): curImageVector = png2vec(curPath + file) images.append(curImageVector) tgt.append(curFileset) count += 1 #end print (len(images)) # In[22]: ''' import random index = random.randint(0,count - 1) random_raw_image = images[index] random_im = Image.fromarray(random_raw_image) title = "No."+ "%04d"%index + " Tag:" + tgt[index] plt.title(title) plt.imshow(random_im) # In[23]: random_raw_image.flatten() ''' # In[24]: from sklearn.model_selection import train_test_split from sklearn import model_selection, metrics images_np = np.array(images) img = images_np.reshape(images_np.shape[0],-1) xM, xT, yM, yT = train_test_split(img, tgt, test_size = 0.01) print(yT) xT = list(xT) #map(list,xT) xT.extend(xM[0:20]) yT.extend(yM[0:20]) print(yT) xT = np.array(xT) print(yT) # # Naive Bayes # In[25]: # 要传参!!! from search keyword2int # 要传参!!! from search keyword2int #keyword2int = {'dog':0,'cat':1} from sklearn import metrics from sklearn import naive_bayes cls = naive_bayes.GaussianNB() cls.fit(xM, yM) res = cls.predict(xT) print(type(xT[0:1])) print(len(xT),len(xT[0]),'result',type(xT)) print(res) print(metrics.confusion_matrix(yT, res),metrics.accuracy_score(yT, res),"GaussianNB") #plt.title('这个图片是'+str(list(keyword2int.keys())[0])+'预测结果是'+ str(list(keyword2int.keys())[int(res_test[0])])) #plt.imshow(img_test) #plt.show() # In[26]: cls = naive_bayes.BernoulliNB(binarize = 0.9) cls.fit(xM, yM) res = cls.predict(xT) print(metrics.confusion_matrix(yT, res),metrics.accuracy_score(yT, res),"BernoulliNB") # In[27]: cls = naive_bayes.MultinomialNB(alpha=0.1, fit_prior=True, class_prior=None) cls.fit(xM, yM) res = cls.predict(xT) print(metrics.confusion_matrix(yT, res),metrics.accuracy_score(yT, res),"naive_bayes.MultinomialNB") # # KNN # In[28]: from sklearn.neighbors import KNeighborsClassifier cls = KNeighborsClassifier(n_neighbors=7) cls.fit(xM, yM) res = cls.predict(xT) print(metrics.confusion_matrix(yT, res),metrics.accuracy_score(yT, res),' knn') ''' # ## KNN Parameters # In[30]: scoreListUniform = [] nListUniform = [] stepCount = 10 start = 1 end = 10 for curN in range(start,end): cls = KNeighborsClassifier(n_neighbors=curN,weights = 'uniform') cls.fit(xM, yM) res = cls.predict(xT) curScore = metrics.accuracy_score(yT, res) nListUniform.append(curN) scoreListUniform.append(curScore) scoreListUniform # In[75]: scoreListDistance = [] nListDistance = [] for curN in range(start,end): cls = KNeighborsClassifier(n_neighbors = curN,weights = 'distance') cls.fit(xM, yM) res = cls.predict(xT) curScore = metrics.accuracy_score(yT, res) nListDistance.append(curN) scoreListDistance.append(curScore) scoreListDistance # ## Plot # In[ ]: x = range(start,end) plt.figure(figsize = (8,4)) plt.plot(x,scoreListUniform,"r-",label = "Uniform Weights",linewidth = 1) plt.plot(x,scoreListDistance,"b-",label = "Weights by Distance",linewidth = 1) plt.xlabel("Neighbours") plt.ylabel("Score") plt.title("KNN Parameters Scores") plt.legend() plt.savefig("KnnParameters.png") plt.show() # ## KNN Parameters: weights # In[ ]: scoreList = [] nList = [] stepCount = 10 start = 1 end = 10 cls = KNeighborsClassifier(weights = 'uniform') cls.fit(xM, yM) res = cls.predict(xT) curScore = metrics.accuracy_score(yT, res) nList.append('uniform') scoreList.append(curScore) cls = KNeighborsClassifier(weights = 'distance') cls.fit(xM, yM) res = cls.predict(xT) curScore = metrics.accuracy_score(yT, res) nList.append('distance') scoreList.append(curScore) ''' # # Decision Tree # In[33]: from sklearn import tree cls = tree.DecisionTreeClassifier() cls = cls.fit(xM,yM) res = cls.predict(xT) print(metrics.confusion_matrix(yT,res), metrics.accuracy_score(yT,res),"Decision Tree") import random index = random.randint(0, 10) x_test = xT[index] y_tag = yT[index] DT_res = cls.predict([x_test]) x_test_image_list = list(x_test) x_temp = [] x_image = [] for j in range(100): for i in range(100): x_temp.append(x_test[i+j*100]) x_image.append(x_temp) x_temp = [] np.array(x_image) plt.title('the image is '+str(list(keyword2int.keys())[int(y_tag)])+' res is '+ str(list(keyword2int.keys())[int(DT_res[0])])) print(DT_res) print(list(keyword2int.keys())[int(y_tag)],list(keyword2int.keys())[int(DT_res[0])]) plt.imshow(x_image) plt.show() import sys sys.exit() # In[40]: from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier(random_state=0, min_samples_split=2) tree.fit(xT,yT) print(tree.score(xT, yT),tree.score(xM, yM),"DecisionTreeClassifier") # In[37]: DecisionTreeClassifier() # # random forest # In[56]: from sklearn.ensemble import RandomForestClassifier forest = RandomForestClassifier(n_estimators=150, random_state=0) forest.fit(xT, yT) forest.score(xT, yT),forest.score(xM, yM) # In[44]: forest # In[74]: scoreListUniform = [] nListUniform = [] stepCount = 10 stepDist = 200 start = 200 end = 2001 for curCount in range(start,end,stepDist): forest = RandomForestClassifier(n_estimators = curCount, random_state=0) forest.fit(xT, yT) nListUniform.append(curCount) scoreListUniform.append(forest.score(xM, yM)) scoreListUniform # In[94]: # ### Gini Random # ### Entropy Best # ### Entropy Random # ## SVM # In[76]: ''' ''' from sklearn import svm cls = svm.SVC() cls = cls.fit(xM,yM) res = cls.predict(xT) print(metrics.confusion_matrix(yT,res), metrics.accuracy_score(yT,res), " svm") # In[77]: ''' cls = svm.LinearSVC (multi_class="crammer_singer") cls = cls.fit(xM,yM) res = cls.predict(xT) print(metrics.confusion_matrix(yT,res), metrics.accuracy_score(yT,res),' svc') # In[92]: clsList = [] stepCount = 10 start = 0.0 end = 1.0 # ### Parameter C # In[93]: stepLength = (end - start)/stepCount for i in range(1, stepCount): cur = start + (i * stepLength) cls = svm.SVC(C = cur) cls = cls.fit(xM,yM) res = cls.predict(xT) clsList.append(metrics.accuracy_score(yT,res)) clsList # In[95]: plt.plot(x,clsList,"r-",label = "Parameter C",linewidth = 1) plt.xlabel("Parameter C") plt.ylabel("percentage") plt.title("SVM ParametersC Scores") plt.legend() plt.savefig("SVMParameters.png") plt.show() ''' keyword2int = {'pine tree': 0, 'maple leaves': 1, 'sakura blossom': 2} classify(keyword2int)
18,228
d29c848f6d4b7e4ef9b8d03548d79c82794370e2
# -*- coding: utf-8 -*- from django.conf.urls import patterns, url from django.views.decorators.csrf import csrf_exempt from .views import (VideoTranscodeWebhookView, S3SignatureEndpoint, VideoFeedItemForm,) urlpatterns = patterns('', url(r'^webhook/(?P<pk>[\d-]+)/$', csrf_exempt(VideoTranscodeWebhookView.as_view()), name='webhook_heywatch'), url(r'^sign/s3/$', S3SignatureEndpoint.as_view(), name='s3signature'), url(r'^(?P<project_slug>[\w-]+)/video/((?P<slug>[\w-]+)/)?$', VideoFeedItemForm.as_view(), name='create_video_feeditem'), )
18,229
c58d3d5b7a956ce667fed460a846b893a5fe0f37
import os from PIL import Image import numpy from math import trunc def splitimage(src, rowrate, dstpath): img = Image.open(src) w, h = img.size print('Original image info: %sx%s, %s, %s' % (w, h, img.format, img.mode)) s = os.path.split(src) if dstpath == '': dstpath = s[0] fn = s[1].split('.') basename = fn[0] ext = fn[-1] num = 0 #rowheight = h // rownum colwidth = w for i in range(len(rowrate)-1): box = (0, rowrate[i] * h, colwidth, rowrate[i + 1] * h) img.crop(box).save(os.path.join(dstpath, basename + '_' + str(num) + '.' + ext), ext) num = num + 1 def analyzeSheetMusic(src): img = Image.open(src) grayImg = img.convert('L') matrix = numpy.asarray(grayImg) h,w = matrix.shape whiteSum = 0 ratio=[0] for i in range(h): for j in range(w): if(matrix[i,j]==255): whiteSum+=1 if (whiteSum==w): ratioFound = (i+0.0)/h if(ratioFound<0.1): continue if(ratioFound<ratio[len(ratio)-1]+0.1): continue ratio.append(ratioFound) whiteSum=0 ratio.append(1) return ratio src = '0.png' if os.path.isfile(src): dstpath = '' if (dstpath == '') or os.path.exists(dstpath): rowlist = analyzeSheetMusic(src) splitimage(src, rowlist, dstpath) else: print('notexist')
18,230
f04ec2e392790e559d4b6849a6cc0077806690d0
from django.shortcuts import HttpResponse from django.shortcuts import render from django.core.cache import cache import os import json from config.models import Pattern, Plat, RunCompany, AddVersion # http://127.0.0.1:8000 # 模式配置 from server_list.models import InsType def config_pattern(request): # 查询模式,数据类型为列表 pattern = [x[0] for x in Pattern.objects.values_list('pattern')] title = ['pattern_name'] # 发送给前端表格的数据 fina = [] # 模式名称 for p in pattern: pattern_server = [p] temp = dict(zip(title, pattern_server)) fina.append(temp) return render(request, 'config_pattern.html', {'data': json.dumps(fina)}) def get_pattern_name(request): pattern_name = request.POST['pattern_name'] cache.set('pattern_name', pattern_name) return HttpResponse(json.dumps('bingo')) # 编辑页面下确认后更新数据库 def confirm_edit(request): try: pattern_name = cache.get('pattern_name') select_ins_type = request.POST['select_ins_type'] info = Pattern.objects.get(pattern=pattern_name) # 如果编辑没有改变原来的值,则默认使用原来的值 pattern = request.POST['pattern'] if pattern == '': pattern = info.pattern player = request.POST['player'] if player == '': player = info.player_num cpu = request.POST['cpu'] if cpu == '': cpu = info.cpu_num memory = request.POST['memory'] if memory == '': memory = info.memory_num disk = request.POST['disk'] if disk == '': disk = info.disk_num flow = request.POST['flow'] if flow == '': flow = info.flow_num pay_type = request.POST['pay_type'] # 更新数据库 Pattern.objects.filter(pattern=pattern_name).update(ins_type=select_ins_type, pattern=pattern, player_num=player, cpu_num=cpu, memory_num=memory, disk_num=disk, flow_num=flow, pay_type=pay_type) return HttpResponse('修改成功') except Exception as e: print(e) return HttpResponse('请检查模式名称是否已经存在') # 模式编辑 def config_pattern_edit(request): pattern_name = cache.get('pattern_name') # 该模式的信息 info = Pattern.objects.get(pattern=pattern_name) # 所有实例类型 all_ins_type = [x[0] for x in InsType.objects.values_list('ins_type').distinct()] return render(request, 'config_pattern_edit.html', {'all_ins_type': all_ins_type, 'ins_type': info.ins_type, 'pay_type': info.pay_type, 'pattern_name': info.pattern, 'player_num': info.player_num, 'cpu_num': info.cpu_num, 'memory_num': info.memory_num, 'disk_num': info.disk_num, 'flow_num': info.flow_num}) # 删除模式 def pattern_delete(request): name = request.POST['pattern_name'] Pattern.objects.filter(pattern=name).delete() return HttpResponse(json.dumps("bingo")) # 模式添加 def config_add_pattern(request): # 获取现有实例所有的实例类型。在定时更新的zero_ins_type表中获取 ins_type = [x[0] for x in InsType.objects.values_list('ins_type').distinct()] # 返回给前端页面以供添加模式时选择。 return render(request, 'config_add_pattern.html', {'ins_type': ins_type}) # 模式添加存库 def config_add_pattern_confirm(request): try: instype = request.POST['ins_type'] paytype = request.POST['pay_type'] pattern_name = request.POST['pattern_name'] player = request.POST['player'] cpu = request.POST['cpu'] memory = request.POST['memory'] disk = request.POST['disk'] flow = request.POST['flow'] patt = Pattern(ins_type=instype, pay_type=paytype, pattern=pattern_name, player_num=player, cpu_num=cpu, memory_num=memory, disk_num=disk, flow_num=flow) # 强制插入模式,防止添加模式的时候,模式名称重复 patt.save(force_insert=True) return HttpResponse('bingo') except Exception as e: print(e) return HttpResponse('error') # 版本编辑页面下确认后更新数据库 def version_confirm_edit(request): version_name = cache.get('version_name') info = AddVersion.objects.get(version=version_name) # 如果编辑没有改变原来的值,则默认使用原来的值 version = request.POST['version'] if version == '': version = info.version # 一个版本可以多个平台 plat = request.POST['plat'].replace('[', '').replace(']', '').replace('"', '') try: # 编辑后更新数据库,这里会修改主键,所以只能用update方法,不能拿出主键的值在保存,因为取出之后表中就没有该数据了,只能create AddVersion.objects.filter(version=version_name).update(version=version, plat=plat) return HttpResponse('修改成功') except Exception as e: print(e) return HttpResponse('请检查版本名称是否已经存在') # 版本配置 def config_version(request): version = AddVersion.objects.values_list('version') title = ['version_name'] # 发送给前端表格的数据 fina = [] # 模式名称 for v in version: version_server = [v[0]] temp = dict(zip(title, version_server)) fina.append(temp) return render(request, 'config_version.html', {'data': json.dumps(fina)}) def get_version_name(request): version_name = request.POST['version_name'] cache.set('version_name', version_name) return HttpResponse(json.dumps('bingo')) # 版本编辑 def config_version_edit(request): version_name = cache.get('version_name') # 该版本的信息 info = AddVersion.objects.get(version=version_name).version # 可以选择的平台,数据类型为列表 plat = [x[0] for x in Plat.objects.values_list('plat')] return render(request, 'config_version_edit.html', {'version': info, 'plat': plat}) # 版本添加 def config_add_version(request): plat = [x[0] for x in Plat.objects.values_list('plat')] return render(request, 'config_add_version.html', {'plat': plat}) # 上传文件 def upload(request): file_obj = request.FILES.get('file_obj') filename = '/home/server/' + file_obj.name with open(filename, 'wb') as f: for chunk in file_obj.chunks(): f.write(chunk) return HttpResponse('传输完成') # 版本添加库 def config_add_version_confirm(request): version = request.POST['version'] plat = request.POST['plat'] file_obj = request.FILES.get('file_obj') filename = file_obj.name # 文件名有括号时, filename = filename.replace('(', '\(').replace(')', '\)') try: # 解压文件 cmd = "cd /home/server; 7z x %s" % filename # print(cmd) if os.system(cmd) != 0: return HttpResponse('服务器文件未能解压,请检查文件名') # 删除压缩文件 rmcmd = "rm -rf /home/server/%s" % filename if os.system(rmcmd) != 0: return HttpResponse('服务器压缩文件未能删除,请检查文件名') # 获得解压之后的文件名 filename = filename.replace('.7z', '') # 文件名更改为版本名 change_name = "mv /home/server/%s /home/server/%s" % (filename, version) if os.system(change_name) != 0: return HttpResponse('文件名未更改成功,请检查版本文件名是否重复') # 将启动服务器命令脚本拷贝至文件内 cmd_start = "cp /home/server/start.sh /home/server/%s" % version if os.system(cmd_start) != 0: return HttpResponse('启动服务器脚本未能拷贝至服务器文件内') # 更改文件名,和版本名一致 # rename_cmd = "mv /home/server/%s /home/server/%s" % (filename, version) # if os.system(rename_cmd) != 0: # return HttpResponse('文件名未能更改成版本名,垃圾!') # 添加版本 add_version = AddVersion(version=version, plat=plat) add_version.save(force_insert=True) return HttpResponse('服务器文件解压完毕,可以进行后续操作') except Exception as e: print(e) return HttpResponse('请检查服务器文件名是否重复') # 删除版本 def version_delete(request): name = request.POST['version_name'] AddVersion.objects.filter(version=name).delete() return HttpResponse(json.dumps("bingo")) # 运营商配置 def config_run_company(request): run_company = [x[0] for x in RunCompany.objects.all().values_list('run_company_name')] title = ['run_company_name'] # 发送给前端表格的数据 fina = [] # 模式名称 for run in run_company: run_company_server = [run] temp = dict(zip(title, run_company_server)) fina.append(temp) return render(request, 'config_run_company.html', {'data': json.dumps(fina)}) def get_run_company_name(request): run_company_name = request.POST['run_company_name'] cache.set('run_company_name', run_company_name) return HttpResponse(json.dumps('bingo')) # 运营商编辑 def config_run_company_edit(request): run_company_name = cache.get('run_company_name') # 该模式的信息 info = RunCompany.objects.get(run_company_name=run_company_name).run_company_name return render(request, 'config_run_company_edit.html', {'run_company': info}) # 运营商编辑页面下确认后更新数据库 def run_company_confirm_edit(request): run_company_name = cache.get('run_company_name') info = RunCompany.objects.get(run_company_name=run_company_name).run_company_name # 如果编辑没有改变原来的值,则默认使用原来的值 run_company = request.POST['run_company'] if run_company == '': run_company = info try: RunCompany.objects.filter(run_company_name=run_company_name).update(run_company_name=run_company) return HttpResponse('修改成功') except Exception as e: print(e) return HttpResponse('请检查运营商名称是否已经存在') # 删除运营商 def run_company_delete(request): name = request.POST['run_company_name'] RunCompany.objects.filter(run_company_name=name).delete() return HttpResponse(json.dumps("bingo")) # 运营商添加 def config_add_run_company(request): return render(request, 'config_add_run_company.html') # 运营商添加入库 def config_add_run_company_confirm(request): name = request.POST['name'] try: rc = RunCompany(run_company_name=name) rc.save(force_insert=True) return HttpResponse('添加成功') except Exception as e: print(e) return HttpResponse('运营商已经存在') # 平台配置 def config_plat(request): # 查询平台,结果类型为列表 plat = [x[0] for x in Plat.objects.all().values_list('plat')] title = ['plat_name'] # 发送给前端表格的数据 fina = [] # 模式名称 for p in plat: plat_server = [p] temp = dict(zip(title, plat_server)) fina.append(temp) return render(request, 'config_plat.html', {'data': json.dumps(fina)}) def get_plat_name(request): plat_name = request.POST['plat_name'] cache.set('plat_name', plat_name) return HttpResponse(json.dumps('bingo')) # 平台编辑 def config_plat_edit(request): plat_name = cache.get('plat_name') # 该模式的信息 info = Plat.objects.get(plat=plat_name).plat return render(request, 'config_plat_edit.html', {'plat': info}) # 平台编辑页面下确认后更新数据库 def plat_confirm_edit(request): plat_name = cache.get('plat_name') info = Plat.objects.get(plat=plat_name).plat # 如果编辑没有改变原来的值,则默认使用原来的值 plat = request.POST['plat'] if plat == '': plat = info try: Plat.objects.filter(plat=plat_name).update(plat=plat) return HttpResponse('修改成功') except Exception as e: print(e) return HttpResponse('请检查平台名称是否已经存在') # 删除运营商 def plat_delete(request): name = request.POST['plat_name'] Plat.objects.filter(plat=name).delete() return HttpResponse(json.dumps("bingo")) # 平台添加 def config_add_plat(request): return render(request, 'config_add_plat.html') # 平台添加入库 def config_add_plat_confirm(request): name = request.POST['name'] try: rc = Plat(plat=name) rc.save(force_insert=True) return HttpResponse('添加成功') except Exception as e: print(e) return HttpResponse('请检查平台名称是否已经存在')
18,231
f2a10b205470f120fd52f9221bd46329bbc8c938
import sys import os sys.path.append('../') dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'fixtures.json')
18,232
f7ae319e064dd8e9a5112935394dfe5f83615be6
from PhysicsTools.PatAlgos.patTemplate_cfg import * from PhysicsTools.PatAlgos.tools.coreTools import * useData=False ############################### ####### Parameters ############ ############################### from FWCore.ParameterSet.VarParsing import VarParsing options = VarParsing ('python') import sys # get the 7 TeV jet corrections from PhysicsTools.PatAlgos.tools.jetTools import * if useData == False : # Make sure to NOT apply L2L3Residual to MC corrections = ['L2Relative', 'L3Absolute'] # global tag for 384 MC process.GlobalTag.globaltag = cms.string('START311_V2::All') else : # Make sure to apply L2L3Residual to data corrections = ['L2Relative', 'L3Absolute', 'L2L3Residual'] # global tag for 386 data process.GlobalTag.globaltag = cms.string('GR_R_311_V2::All') # add the flavor history process.load("PhysicsTools.HepMCCandAlgos.flavorHistoryPaths_cfi") # PF from RECO and not using PF2PAT addJetCollection(process,cms.InputTag('ak5PFJets'), 'AK5', 'PF', doJTA = True, doBTagging = True, jetCorrLabel = ('AK5PF',corrections), doType1MET = False, doL1Cleaning = False, doL1Counters = False, genJetCollection = cms.InputTag("ak5GenJets"), doJetID = False, jetIdLabel = "ak5" ) from PhysicsTools.PatAlgos.tools.metTools import * addTcMET(process, 'TC') addPfMET(process, 'PF') process.selectedPatJetsAK5PF.cut = cms.string('pt > 20 & abs(eta) < 2.4') process.selectedPatJets.cut = cms.string('pt > 20 & abs(eta) < 2.4') process.patJets.tagInfoSources = cms.VInputTag( cms.InputTag("secondaryVertexTagInfosAOD") ) process.patJetsAK5PF.tagInfoSources = cms.VInputTag( cms.InputTag("secondaryVertexTagInfosAK5PF") ) process.selectedPatMuons.cut = cms.string("pt > 10") process.patMuons.usePV = False process.patMuons.embedTrack = True process.selectedPatElectrons.cut = cms.string("pt > 10") process.patElectrons.usePV = False process.patElectrons.embedTrack = True process.patJets.userData.userFunctions = cms.vstring( "? hasTagInfo('secondaryVertex') && tagInfoSecondaryVertex('secondaryVertex').nVertices() > 0 ? " "tagInfoSecondaryVertex('secondaryVertex').secondaryVertex(0).p4().mass() : 0") process.patJets.userData.userFunctionLabels = cms.vstring('secvtxMass') process.patJetsAK5PF.userData.userFunctions = cms.vstring( "? hasTagInfo('secondaryVertex') && tagInfoSecondaryVertex('secondaryVertex').nVertices() > 0 ? " "tagInfoSecondaryVertex('secondaryVertex').secondaryVertex(0).p4().mass() : 0") process.patJetsAK5PF.userData.userFunctionLabels = cms.vstring('secvtxMass') process.patJets.embedPFCandidates = True process.patJets.embedCaloTowers = True process.patJetsAK5PF.embedCaloTowers = True process.patJetsAK5PF.embedPFCandidates = True # prune gen particles process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi") process.prunedGenParticles = cms.EDProducer("GenParticlePruner", src = cms.InputTag("genParticles"), select = cms.vstring( "drop *", "keep status = 3", #keeps all particles from the hard matrix element "+keep (abs(pdgId) = 11 | abs(pdgId) = 13) & status = 1" #keeps all stable muons and electrons and their (direct) mothers. ) ) process.patseq = cms.Sequence( # process.scrapingVeto* # process.primaryVertexFilter* # process.HBHENoiseFilter* process.patDefaultSequence* process.flavorHistorySeq * process.prunedGenParticles ) process.p1 = cms.Path( process.patseq ) process.out.SelectEvents.SelectEvents = cms.vstring('p1') process.maxEvents.input = -1 process.options.wantSummary = True process.out.dropMetaData = cms.untracked.string("DROPPED") from PhysicsTools.PatAlgos.patEventContent_cff import patEventContentNoCleaning from PhysicsTools.PatAlgos.patEventContent_cff import patExtraAodEventContent from PhysicsTools.PatAlgos.patEventContent_cff import patTriggerEventContent process.out.outputCommands = [ 'keep GenRunInfoProduct_generator_*_*', 'keep GenEventInfoProduct_generator_*_*', 'keep *_flavorHistoryFilter_*_*', 'keep *_prunedGenParticles_*_*', # 'keep *_decaySubset_*_*', # 'keep *_initSubset_*_*', 'drop *_cleanPat*_*_*', 'keep *_selectedPat*_*_*', 'keep *_patMETs*_*_*', # 'keep recoPFCandidates_particleFlow_*_*', 'keep *_offlineBeamSpot_*_*', 'keep *_offlinePrimaryVertices_*_*', 'keep recoTracks_generalTracks_*_*', 'drop patPFParticles_*_*_*', # 'keep patTriggerObjects_patTrigger_*_*', # 'keep patTriggerFilters_patTrigger_*_*', 'keep patTriggerPaths_patTrigger_*_*', 'keep patTriggerEvent_patTriggerEvent_*_*', # 'keep *_cleanPatPhotonsTriggerMatch_*_*', # 'keep *_cleanPatElectronsTriggerMatch_*_*', # 'keep *_cleanPatMuonsTriggerMatch_*_*', # 'keep *_cleanPatTausTriggerMatch_*_*', # 'keep *_cleanPatJetsTriggerMatch_*_*', # 'keep *_patMETsTriggerMatch_*_*', 'drop *_MEtoEDMConverter_*_*' ]
18,233
b2738d1fc0ea799b1ba79970e239da7708f194eb
{"name":"main-win.py","path":"main-win.py","sha":"481f4e71153a7443c50ee90d9aaa9dc41c204f3a","size":940,"url":"https://api.github.com/repos/aryoume/Project1/contents/main-win.py?ref=fb1913815eff502c0a0f313687e3391babf9d9f8","html_url":"https://github.com/aryoume/Project1/blob/fb1913815eff502c0a0f313687e3391babf9d9f8/main-win.py","git_url":"https://api.github.com/repos/aryoume/Project1/git/blobs/481f4e71153a7443c50ee90d9aaa9dc41c204f3a","download_url":"https://raw.githubusercontent.com/aryoume/Project1/fb1913815eff502c0a0f313687e3391babf9d9f8/main-win.py","type":"file","content":"aW1wb3J0IHVybGxpYi5yZXF1ZXN0CmltcG9ydCB1cmxsaWIucGFyc2UKaW1w\nb3J0IG9zCmltcG9ydCB0aW1lCmltcG9ydCBzc2wKc3NsLl9jcmVhdGVfZGVm\nYXVsdF9odHRwc19jb250ZXh0ID0gc3NsLl9jcmVhdGVfdW52ZXJpZmllZF9j\nb250ZXh0Cgpvcy5zeXN0ZW0oImNscyAmJiBlY2hvIEFSWW91TWUiKQpvcy5z\neXN0ZW0oImVjaG8gQ29tbWVudCIpCgptc2cgPSBpbnB1dCgiTWVzc2FnZTog\nIikKdG9rZW4gPSBpbnB1dCgiVG9rZW46ICIpCmlkID0gaW5wdXQoIklkIFBv\nc3Q6ICIpCgpkZWYgbWFpbigpOgogICAgb3Muc3lzdGVtKCJjbHMgJiYgZWNo\nbyBDb21tZW50IikKICAgIHByaW50KCJbMV0gMTAgQ29tbWVudFxuIiArICJb\nMl0gMTAwIENvbW1lbnRcbiIgKyAiWzNdIDUwMCBDb21tZW50IikKICAgIG1h\naW4gPSBpbnB1dCgiRW50ZXIgTnVtYmVyOiAiKQogICAgb3Muc3lzdGVtKCJj\nbHMgJiYgZWNobyBTdGFydCIpCiAgICBuID0gMTAgaWYgbWFpbiA9PSAiMSIg\nZWxzZSAxMDAgaWYgbWFpbiA9PSAiMiIgZWxzZSA1MDAgaWYgbWFpbiA9PSAi\nMyIgZWxzZSAwCiAgICBpZiBuID09IDA6CiAgICAgICAgb3Muc3lzdGVtKCJj\nbHMgJiYgZWNobyBXcm9uZyEiKQogICAgICAgIHRpbWUuc2xlZXAoMC41KQog\nICAgICAgIG9zLnN5c3RlbSgicHl0aG9uIG1haW4ucHkiKSAKICAgIGZsb29k\nKG4pCgpkZWYgZmxvb2Qobik6CiAgICBmb3IgaSBpbiByYW5nZShuKToKICAg\nICAgICB1cmxsaWIucmVxdWVzdC51cmxvcGVuKCIgaHR0cHM6Ly9ncmFwaC5m\nYWNlYm9vay5jb20vIiArIGlkICsgIi9jb21tZW50cz9tZXNzYWdlPSIgKyB1\ncmxsaWIucGFyc2UucXVvdGVfcGx1cyhtc2cpICsiJm1ldGhvZD1wb3N0JmFj\nY2Vzc190b2tlbj0iICsgdG9rZW4pCiAgICBwcmludCgiRG9uZSIpCgppZiBf\nX25hbWVfXyA9PSAiX19tYWluX18iOgogICAgbWFpbigpCgogICAgCg==\n","encoding":"base64","_links":{"self":"https://api.github.com/repos/aryoume/Project1/contents/main-win.py?ref=fb1913815eff502c0a0f313687e3391babf9d9f8","git":"https://api.github.com/repos/aryoume/Project1/git/blobs/481f4e71153a7443c50ee90d9aaa9dc41c204f3a","html":"https://github.com/aryoume/Project1/blob/fb1913815eff502c0a0f313687e3391babf9d9f8/main-win.py"}}
18,234
2cc094365bf875c716ee72b765abe2b621f0e204
import FWCore.ParameterSet.Config as cms import HLTrigger.HLTfilters.hltHighLevel_cfi ALCARECOEcalESAlignHLT = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone( andOr = True, # choose logical OR between Triggerbits eventSetupPathsKey = 'EcalESAlign', throw = False # tolerate triggers stated above, but not available ) # this imports the module that produces a reduced collections for ES alignment #from Calibration.EcalAlCaRecoProducers.EcalAlCaESAlignTrackReducer_cfi import * # this imports the filter that skims the events requiring a min number of selected tracks esSelectedTracks = cms.EDFilter("TrackSelector", src = cms.InputTag('generalTracks'), cut = cms.string("abs(eta)>1.65 && pt>1 && numberOfValidHits>=10") ) import Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi ecalAlCaESAlignTrackReducer = Alignment.CommonAlignmentProducer.AlignmentTrackSelector_cfi.AlignmentTrackSelector.clone( src = cms.InputTag('esSelectedTracks'), filter = True, ##do not store empty events applyBasicCuts = False, ptMin = 1.0, ##GeV etaMin = -3.5, etaMax = 3.5, nHitMin = 0 ) esMinTrackNumberFilter = cms.EDFilter("TrackCountFilter", src = cms.InputTag('ecalAlCaESAlignTrackReducer'), minNumber = cms.uint32(10) ) EcalESAlignTracksSkimSeq = cms.Sequence( esSelectedTracks * ecalAlCaESAlignTrackReducer * esMinTrackNumberFilter) seqEcalESAlign = cms.Sequence(ALCARECOEcalESAlignHLT * EcalESAlignTracksSkimSeq)
18,235
567316e1be3cce2fa1743d9a1b93d4f69424b2d1
def lcs(a,b): lcs = [[None]*(len(b)+1) for i in range(len(a)+1)] for i in range(0,len(a)+1): for j in range(0,len(b)+1): if i==0 or j==0: lcs[i][j]=0 elif a[i-1]==b[j-1]: lcs[i][j]= 1+lcs[i-1][j-1] else: lcs[i][j]=max(lcs[i-1][j],lcs[i][j-1]) for i in range(0,len(a)+1): for j in range(0,len(b)+1): print(lcs[i][j],end="") print() print("The length of the LCS",lcs[i][j]) a=[5,3,2,7,8,1,6] b=[7,1,6,8,2,5,9] lcs(a,b)
18,236
c800efc442d70932f6e625d4f3eac58789f85554
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'mainwindow.ui' # # Created: Sun Nov 8 22:41:27 2015 # by: PyQt4 UI code generator 4.10.4 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.setEnabled(True) MainWindow.resize(600, 600) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth()) MainWindow.setSizePolicy(sizePolicy) MainWindow.setMinimumSize(QtCore.QSize(600, 600)) MainWindow.setMaximumSize(QtCore.QSize(600, 600)) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8("assets/logo.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off) MainWindow.setWindowIcon(icon) self.viewOne = QtGui.QFrame(MainWindow) self.viewOne.setEnabled(True) self.viewOne.setGeometry(QtCore.QRect(10, 600, 580, 580)) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.viewOne.sizePolicy().hasHeightForWidth()) self.viewOne.setSizePolicy(sizePolicy) self.viewOne.setMinimumSize(QtCore.QSize(580, 580)) self.viewOne.setMaximumSize(QtCore.QSize(580, 580)) self.viewOne.setLayoutDirection(QtCore.Qt.LeftToRight) self.viewOne.setStyleSheet(_fromUtf8("")) self.viewOne.setFrameShape(QtGui.QFrame.StyledPanel) self.viewOne.setFrameShadow(QtGui.QFrame.Raised) self.viewOne.setObjectName(_fromUtf8("viewOne")) self.gridLayout = QtGui.QGridLayout(self.viewOne) self.gridLayout.setObjectName(_fromUtf8("gridLayout")) self.floorNext = QtGui.QPushButton(self.viewOne) self.floorNext.setEnabled(False) self.floorNext.setMinimumSize(QtCore.QSize(150, 0)) self.floorNext.setObjectName(_fromUtf8("floorNext")) self.gridLayout.addWidget(self.floorNext, 5, 2, 1, 1) self.label_2 = QtGui.QLabel(self.viewOne) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(150) sizePolicy.setVerticalStretch(150) sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth()) self.label_2.setSizePolicy(sizePolicy) self.label_2.setMaximumSize(QtCore.QSize(150, 150)) self.label_2.setText(_fromUtf8("")) self.label_2.setPixmap(QtGui.QPixmap(_fromUtf8("assets/logo.png"))) self.label_2.setScaledContents(True) self.label_2.setObjectName(_fromUtf8("label_2")) self.gridLayout.addWidget(self.label_2, 0, 2, 1, 1) self.floorImage = QtGui.QLabel(self.viewOne) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.floorImage.sizePolicy().hasHeightForWidth()) self.floorImage.setSizePolicy(sizePolicy) self.floorImage.setMinimumSize(QtCore.QSize(400, 0)) self.floorImage.setMaximumSize(QtCore.QSize(400, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.floorImage.setFont(font) self.floorImage.setText(_fromUtf8("")) self.floorImage.setPixmap(QtGui.QPixmap(_fromUtf8("assets/0U.jpg"))) self.floorImage.setScaledContents(True) self.floorImage.setAlignment(QtCore.Qt.AlignCenter) self.floorImage.setObjectName(_fromUtf8("floorImage")) self.gridLayout.addWidget(self.floorImage, 0, 0, 6, 1) self.selectFloor = QtGui.QGroupBox(self.viewOne) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(150) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.selectFloor.sizePolicy().hasHeightForWidth()) self.selectFloor.setSizePolicy(sizePolicy) self.selectFloor.setMinimumSize(QtCore.QSize(150, 0)) self.selectFloor.setMaximumSize(QtCore.QSize(150, 16777215)) font = QtGui.QFont() font.setBold(False) font.setWeight(50) self.selectFloor.setFont(font) self.selectFloor.setAlignment(QtCore.Qt.AlignCenter) self.selectFloor.setObjectName(_fromUtf8("selectFloor")) self.gridLayout_6 = QtGui.QGridLayout(self.selectFloor) self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6")) self.gridLayout_5 = QtGui.QGridLayout() self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5")) self.secondFloor = QtGui.QRadioButton(self.selectFloor) self.secondFloor.setLayoutDirection(QtCore.Qt.LeftToRight) self.secondFloor.setObjectName(_fromUtf8("secondFloor")) self.gridLayout_5.addWidget(self.secondFloor, 2, 0, 1, 1) self.firstFloor = QtGui.QRadioButton(self.selectFloor) self.firstFloor.setLayoutDirection(QtCore.Qt.LeftToRight) self.firstFloor.setObjectName(_fromUtf8("firstFloor")) self.gridLayout_5.addWidget(self.firstFloor, 1, 0, 1, 1) self.groundFloor = QtGui.QRadioButton(self.selectFloor) self.groundFloor.setLayoutDirection(QtCore.Qt.LeftToRight) self.groundFloor.setChecked(True) self.groundFloor.setObjectName(_fromUtf8("groundFloor")) self.gridLayout_5.addWidget(self.groundFloor, 0, 0, 1, 1) self.gridLayout_6.addLayout(self.gridLayout_5, 0, 0, 1, 1) self.gridLayout.addWidget(self.selectFloor, 1, 2, 1, 1) self.groupBox = QtGui.QGroupBox(self.viewOne) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth()) self.groupBox.setSizePolicy(sizePolicy) self.groupBox.setAlignment(QtCore.Qt.AlignCenter) self.groupBox.setObjectName(_fromUtf8("groupBox")) self.gridLayout_8 = QtGui.QGridLayout(self.groupBox) self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8")) self.selectRoom = QtGui.QComboBox(self.groupBox) self.selectRoom.setObjectName(_fromUtf8("selectRoom")) self.gridLayout_8.addWidget(self.selectRoom, 0, 0, 1, 1) self.gridLayout.addWidget(self.groupBox, 3, 2, 1, 1) self.selectWing = QtGui.QGroupBox(self.viewOne) self.selectWing.setEnabled(True) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.selectWing.sizePolicy().hasHeightForWidth()) self.selectWing.setSizePolicy(sizePolicy) self.selectWing.setAlignment(QtCore.Qt.AlignCenter) self.selectWing.setObjectName(_fromUtf8("selectWing")) self.gridLayout_7 = QtGui.QGridLayout(self.selectWing) self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7")) self.u = QtGui.QRadioButton(self.selectWing) self.u.setChecked(True) self.u.setObjectName(_fromUtf8("u")) self.gridLayout_7.addWidget(self.u, 0, 0, 1, 1) self.v = QtGui.QRadioButton(self.selectWing) self.v.setObjectName(_fromUtf8("v")) self.gridLayout_7.addWidget(self.v, 0, 1, 1, 1) self.w = QtGui.QRadioButton(self.selectWing) self.w.setObjectName(_fromUtf8("w")) self.gridLayout_7.addWidget(self.w, 0, 2, 1, 1) self.z = QtGui.QRadioButton(self.selectWing) self.z.setObjectName(_fromUtf8("z")) self.gridLayout_7.addWidget(self.z, 1, 2, 1, 1) self.y = QtGui.QRadioButton(self.selectWing) self.y.setObjectName(_fromUtf8("y")) self.gridLayout_7.addWidget(self.y, 1, 1, 1, 1) self.x = QtGui.QRadioButton(self.selectWing) self.x.setObjectName(_fromUtf8("x")) self.gridLayout_7.addWidget(self.x, 1, 0, 1, 1) self.gridLayout.addWidget(self.selectWing, 2, 2, 1, 1) self.viewTwo = QtGui.QFrame(MainWindow) self.viewTwo.setGeometry(QtCore.QRect(10, 600, 578, 572)) self.viewTwo.setStyleSheet(_fromUtf8("")) self.viewTwo.setFrameShape(QtGui.QFrame.StyledPanel) self.viewTwo.setFrameShadow(QtGui.QFrame.Raised) self.viewTwo.setObjectName(_fromUtf8("viewTwo")) self.gridLayout_2 = QtGui.QGridLayout(self.viewTwo) self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2")) self.roomNext = QtGui.QPushButton(self.viewTwo) self.roomNext.setEnabled(False) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.roomNext.sizePolicy().hasHeightForWidth()) self.roomNext.setSizePolicy(sizePolicy) self.roomNext.setMinimumSize(QtCore.QSize(150, 0)) self.roomNext.setMaximumSize(QtCore.QSize(75, 16777215)) self.roomNext.setObjectName(_fromUtf8("roomNext")) self.gridLayout_2.addWidget(self.roomNext, 2, 4, 1, 1) self.groupBox_2 = QtGui.QGroupBox(self.viewTwo) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.groupBox_2.sizePolicy().hasHeightForWidth()) self.groupBox_2.setSizePolicy(sizePolicy) self.groupBox_2.setMinimumSize(QtCore.QSize(150, 0)) self.groupBox_2.setMaximumSize(QtCore.QSize(150, 16777215)) self.groupBox_2.setAlignment(QtCore.Qt.AlignCenter) self.groupBox_2.setObjectName(_fromUtf8("groupBox_2")) self.verticalLayout = QtGui.QVBoxLayout(self.groupBox_2) self.verticalLayout.setObjectName(_fromUtf8("verticalLayout")) self.scanInfo = QtGui.QPlainTextEdit(self.groupBox_2) self.scanInfo.setFrameShape(QtGui.QFrame.Box) self.scanInfo.setFrameShadow(QtGui.QFrame.Plain) self.scanInfo.setReadOnly(True) self.scanInfo.setBackgroundVisible(False) self.scanInfo.setCenterOnScroll(False) self.scanInfo.setObjectName(_fromUtf8("scanInfo")) self.verticalLayout.addWidget(self.scanInfo) self.scanButton = QtGui.QPushButton(self.groupBox_2) self.scanButton.setObjectName(_fromUtf8("scanButton")) self.verticalLayout.addWidget(self.scanButton) self.gridLayout_2.addWidget(self.groupBox_2, 1, 4, 1, 1) self.label_4 = QtGui.QLabel(self.viewTwo) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(150) sizePolicy.setVerticalStretch(150) sizePolicy.setHeightForWidth(self.label_4.sizePolicy().hasHeightForWidth()) self.label_4.setSizePolicy(sizePolicy) self.label_4.setMaximumSize(QtCore.QSize(150, 150)) self.label_4.setText(_fromUtf8("")) self.label_4.setPixmap(QtGui.QPixmap(_fromUtf8("assets/logo.png"))) self.label_4.setScaledContents(True) self.label_4.setObjectName(_fromUtf8("label_4")) self.gridLayout_2.addWidget(self.label_4, 0, 4, 1, 1) self.roomImage = QtGui.QLabel(self.viewTwo) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.roomImage.sizePolicy().hasHeightForWidth()) self.roomImage.setSizePolicy(sizePolicy) self.roomImage.setMinimumSize(QtCore.QSize(400, 0)) self.roomImage.setMaximumSize(QtCore.QSize(400, 16777215)) self.roomImage.setLineWidth(0) self.roomImage.setText(_fromUtf8("")) self.roomImage.setPixmap(QtGui.QPixmap(_fromUtf8("assets/rooms/U103.jpg"))) self.roomImage.setScaledContents(True) self.roomImage.setObjectName(_fromUtf8("roomImage")) self.gridLayout_2.addWidget(self.roomImage, 0, 0, 3, 1) self.viewThree = QtGui.QFrame(MainWindow) self.viewThree.setGeometry(QtCore.QRect(10, 10, 581, 581)) self.viewThree.setStyleSheet(_fromUtf8("")) self.viewThree.setFrameShape(QtGui.QFrame.StyledPanel) self.viewThree.setFrameShadow(QtGui.QFrame.Raised) self.viewThree.setObjectName(_fromUtf8("viewThree")) self.gridLayout_3 = QtGui.QGridLayout(self.viewThree) self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3")) self.saveFileBox = QtGui.QGroupBox(self.viewThree) self.saveFileBox.setAlignment(QtCore.Qt.AlignCenter) self.saveFileBox.setObjectName(_fromUtf8("saveFileBox")) self.gridLayout_9 = QtGui.QGridLayout(self.saveFileBox) self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9")) self.gridLayout_10 = QtGui.QGridLayout() self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10")) self.saveInfo = QtGui.QLabel(self.saveFileBox) font = QtGui.QFont() font.setBold(True) font.setWeight(75) self.saveInfo.setFont(font) self.saveInfo.setObjectName(_fromUtf8("saveInfo")) self.gridLayout_10.addWidget(self.saveInfo, 0, 1, 1, 1) self.saveData2 = QtGui.QLabel(self.saveFileBox) self.saveData2.setObjectName(_fromUtf8("saveData2")) self.gridLayout_10.addWidget(self.saveData2, 2, 1, 1, 1) self.saveFileBttn = QtGui.QPushButton(self.saveFileBox) self.saveFileBttn.setEnabled(False) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.saveFileBttn.sizePolicy().hasHeightForWidth()) self.saveFileBttn.setSizePolicy(sizePolicy) self.saveFileBttn.setObjectName(_fromUtf8("saveFileBttn")) self.gridLayout_10.addWidget(self.saveFileBttn, 4, 3, 1, 1, QtCore.Qt.AlignHCenter) self.saveFileLoc = QtGui.QLabel(self.saveFileBox) self.saveFileLoc.setObjectName(_fromUtf8("saveFileLoc")) self.gridLayout_10.addWidget(self.saveFileLoc, 4, 1, 1, 1) self.logFile = QtGui.QRadioButton(self.saveFileBox) self.logFile.setObjectName(_fromUtf8("logFile")) self.gridLayout_10.addWidget(self.logFile, 2, 3, 1, 1) self.saveData1 = QtGui.QLabel(self.saveFileBox) self.saveData1.setObjectName(_fromUtf8("saveData1")) self.gridLayout_10.addWidget(self.saveData1, 1, 1, 1, 1) self.csv = QtGui.QRadioButton(self.saveFileBox) self.csv.setObjectName(_fromUtf8("csv")) self.gridLayout_10.addWidget(self.csv, 0, 3, 1, 1) self.saveFileIcon = QtGui.QLabel(self.saveFileBox) self.saveFileIcon.setMaximumSize(QtCore.QSize(110, 115)) self.saveFileIcon.setText(_fromUtf8("")) self.saveFileIcon.setPixmap(QtGui.QPixmap(_fromUtf8("assets/saveIcon.png"))) self.saveFileIcon.setScaledContents(True) self.saveFileIcon.setMargin(5) self.saveFileIcon.setObjectName(_fromUtf8("saveFileIcon")) self.gridLayout_10.addWidget(self.saveFileIcon, 0, 0, 5, 1) self.line = QtGui.QFrame(self.saveFileBox) self.line.setFrameShape(QtGui.QFrame.VLine) self.line.setFrameShadow(QtGui.QFrame.Sunken) self.line.setObjectName(_fromUtf8("line")) self.gridLayout_10.addWidget(self.line, 0, 2, 5, 1) self.database = QtGui.QRadioButton(self.saveFileBox) self.database.setObjectName(_fromUtf8("database")) self.gridLayout_10.addWidget(self.database, 1, 3, 1, 1) self.allFiles = QtGui.QRadioButton(self.saveFileBox) self.allFiles.setObjectName(_fromUtf8("allFiles")) self.gridLayout_10.addWidget(self.allFiles, 3, 3, 1, 1) self.gridLayout_9.addLayout(self.gridLayout_10, 0, 0, 1, 1) self.gridLayout_3.addWidget(self.saveFileBox, 1, 0, 1, 3) self.bannerImage = QtGui.QLabel(self.viewThree) self.bannerImage.setText(_fromUtf8("")) self.bannerImage.setPixmap(QtGui.QPixmap(_fromUtf8("assets/teamAwesome.png"))) self.bannerImage.setAlignment(QtCore.Qt.AlignCenter) self.bannerImage.setObjectName(_fromUtf8("bannerImage")) self.gridLayout_3.addWidget(self.bannerImage, 2, 0, 1, 3) spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.gridLayout_3.addItem(spacerItem, 3, 0, 1, 1) self.restartButton = QtGui.QPushButton(self.viewThree) self.restartButton.setObjectName(_fromUtf8("restartButton")) self.gridLayout_3.addWidget(self.restartButton, 3, 1, 1, 1) self.groupBox_3 = QtGui.QGroupBox(self.viewThree) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.groupBox_3.sizePolicy().hasHeightForWidth()) self.groupBox_3.setSizePolicy(sizePolicy) self.groupBox_3.setAlignment(QtCore.Qt.AlignCenter) self.groupBox_3.setObjectName(_fromUtf8("groupBox_3")) self.gridLayout_4 = QtGui.QGridLayout(self.groupBox_3) self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4")) self.dataTable = QtGui.QTableWidget(self.groupBox_3) self.dataTable.setMaximumSize(QtCore.QSize(16777215, 175)) self.dataTable.setFrameShape(QtGui.QFrame.Box) self.dataTable.setFrameShadow(QtGui.QFrame.Plain) self.dataTable.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.dataTable.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.dataTable.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers) self.dataTable.setAlternatingRowColors(True) self.dataTable.setSelectionMode(QtGui.QAbstractItemView.NoSelection) self.dataTable.setShowGrid(True) self.dataTable.setObjectName(_fromUtf8("dataTable")) self.dataTable.setColumnCount(7) self.dataTable.setRowCount(5) item = QtGui.QTableWidgetItem() self.dataTable.setVerticalHeaderItem(0, item) item = QtGui.QTableWidgetItem() self.dataTable.setVerticalHeaderItem(1, item) item = QtGui.QTableWidgetItem() self.dataTable.setVerticalHeaderItem(2, item) item = QtGui.QTableWidgetItem() self.dataTable.setVerticalHeaderItem(3, item) item = QtGui.QTableWidgetItem() self.dataTable.setVerticalHeaderItem(4, item) item = QtGui.QTableWidgetItem() self.dataTable.setHorizontalHeaderItem(0, item) item = QtGui.QTableWidgetItem() self.dataTable.setHorizontalHeaderItem(1, item) item = QtGui.QTableWidgetItem() self.dataTable.setHorizontalHeaderItem(2, item) item = QtGui.QTableWidgetItem() self.dataTable.setHorizontalHeaderItem(3, item) item = QtGui.QTableWidgetItem() self.dataTable.setHorizontalHeaderItem(4, item) item = QtGui.QTableWidgetItem() self.dataTable.setHorizontalHeaderItem(5, item) item = QtGui.QTableWidgetItem() self.dataTable.setHorizontalHeaderItem(6, item) item = QtGui.QTableWidgetItem() self.dataTable.setItem(0, 0, item) item = QtGui.QTableWidgetItem() self.dataTable.setItem(0, 1, item) item = QtGui.QTableWidgetItem() self.dataTable.setItem(0, 2, item) item = QtGui.QTableWidgetItem() self.dataTable.setItem(0, 3, item) item = QtGui.QTableWidgetItem() self.dataTable.setItem(0, 4, item) item = QtGui.QTableWidgetItem() self.dataTable.setItem(0, 5, item) item = QtGui.QTableWidgetItem() self.dataTable.setItem(0, 6, item) self.dataTable.horizontalHeader().setDefaultSectionSize(70) self.dataTable.horizontalHeader().setStretchLastSection(True) self.dataTable.verticalHeader().setStretchLastSection(True) self.gridLayout_4.addWidget(self.dataTable, 0, 0, 1, 1) self.gridLayout_3.addWidget(self.groupBox_3, 0, 0, 1, 3) self.exitButton = QtGui.QPushButton(self.viewThree) self.exitButton.setObjectName(_fromUtf8("exitButton")) self.gridLayout_3.addWidget(self.exitButton, 3, 2, 1, 1) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate("MainWindow", "Wifi Sniffer", None)) self.floorNext.setText(_translate("MainWindow", "Next", None)) self.selectFloor.setTitle(_translate("MainWindow", " Please Select a Floor ", None)) self.secondFloor.setText(_translate("MainWindow", "Second Floor", None)) self.firstFloor.setText(_translate("MainWindow", "First Floor", None)) self.groundFloor.setText(_translate("MainWindow", "Ground Floor", None)) self.groupBox.setTitle(_translate("MainWindow", " Select Room ", None)) self.selectWing.setTitle(_translate("MainWindow", " Select Wing ", None)) self.u.setText(_translate("MainWindow", "U", None)) self.v.setText(_translate("MainWindow", "V", None)) self.w.setText(_translate("MainWindow", "W", None)) self.z.setText(_translate("MainWindow", "Z", None)) self.y.setText(_translate("MainWindow", "Y", None)) self.x.setText(_translate("MainWindow", "X", None)) self.roomNext.setText(_translate("MainWindow", "Next", None)) self.groupBox_2.setTitle(_translate("MainWindow", "Room Information", None)) self.scanInfo.setPlainText(_translate("MainWindow", "Stand at the North most point of the room and click the scan button, Repeat these steps for each point as requested.\n" "\n" "Once the scan is complete the results will be displayed.", None)) self.scanButton.setText(_translate("MainWindow", "Scan North", None)) self.saveFileBox.setTitle(_translate("MainWindow", "Save File", None)) self.saveInfo.setText(_translate("MainWindow", "Save Information:", None)) self.saveData2.setText(_translate("MainWindow", "File will be saved in the programs directory.", None)) self.saveFileBttn.setText(_translate("MainWindow", "Save File", None)) self.saveFileLoc.setText(_translate("MainWindow", "", None)) self.logFile.setText(_translate("MainWindow", ".log - Log file", None)) self.saveData1.setText(_translate("MainWindow", "Export the data as a file of your choice.", None)) self.csv.setText(_translate("MainWindow", ".csv - Comma Seperated Value", None)) self.database.setText(_translate("MainWindow", ".db - Database", None)) self.allFiles.setText(_translate("MainWindow", "Export all file types", None)) self.restartButton.setText(_translate("MainWindow", "Restart", None)) self.groupBox_3.setTitle(_translate("MainWindow", "Gathered Data", None)) item = self.dataTable.verticalHeaderItem(0) item.setText(_translate("MainWindow", "N", None)) item = self.dataTable.verticalHeaderItem(1) item.setText(_translate("MainWindow", "S", None)) item = self.dataTable.verticalHeaderItem(2) item.setText(_translate("MainWindow", "E", None)) item = self.dataTable.verticalHeaderItem(3) item.setText(_translate("MainWindow", "W", None)) item = self.dataTable.verticalHeaderItem(4) item.setText(_translate("MainWindow", "C", None)) item = self.dataTable.horizontalHeaderItem(0) item.setText(_translate("MainWindow", "Gateway", None)) item = self.dataTable.horizontalHeaderItem(1) item.setText(_translate("MainWindow", "Access Point", None)) item = self.dataTable.horizontalHeaderItem(2) item.setText(_translate("MainWindow", "Loss (%)", None)) item = self.dataTable.horizontalHeaderItem(3) item.setText(_translate("MainWindow", "Delay (ms)", None)) item = self.dataTable.horizontalHeaderItem(4) item.setText(_translate("MainWindow", "Quality", None)) item = self.dataTable.horizontalHeaderItem(5) item.setText(_translate("MainWindow", "Signal (dbm)", None)) item = self.dataTable.horizontalHeaderItem(6) item.setText(_translate("MainWindow", "Speed (Mb)", None)) __sortingEnabled = self.dataTable.isSortingEnabled() self.dataTable.setSortingEnabled(False) item = self.dataTable.item(0, 0) item.setText(_translate("MainWindow", " ", None)) item = self.dataTable.item(0, 1) item.setText(_translate("MainWindow", " ", None)) item = self.dataTable.item(0, 2) item.setText(_translate("MainWindow", " ", None)) item = self.dataTable.item(0, 3) item.setText(_translate("MainWindow", " ", None)) item = self.dataTable.item(0, 4) item.setText(_translate("MainWindow", " ", None)) item = self.dataTable.item(0, 5) item.setText(_translate("MainWindow", " ", None)) item = self.dataTable.item(0, 6) item.setText(_translate("MainWindow", " ", None)) self.dataTable.setSortingEnabled(__sortingEnabled) self.exitButton.setText(_translate("MainWindow", "Exit", None)) if __name__ == "__main__": import sys app = QtGui.QApplication(sys.argv) MainWindow = QtGui.QWidget() ui = Ui_MainWindow() ui.setupUi(MainWindow) MainWindow.show() sys.exit(app.exec_())
18,237
8c879a435abb1f28876c690b9cfaec34b2841838
import sys sys.path.append('.') sys.path.append('..') import json import os from azure.storage.blob import BlobServiceClient from commons.DatabaseConnection import DatabaseConnection STORAGE_CONNECTION_STRING = os.getenv('storage_connection_string') blob_service_client = BlobServiceClient.from_connection_string(STORAGE_CONNECTION_STRING) DATABASE_URL = os.environ['database_url'] DATABASE_NAME = os.environ['database_name'] DATABASE_COLLECTION = os.environ['database_collection'] DATABASE_KEY = os.environ['database_key'] db_conn = DatabaseConnection(DATABASE_URL, DATABASE_NAME, DATABASE_COLLECTION, DATABASE_KEY) async def clean_samples() -> list: beats = db_conn.execute_query(db_conn.query_map['get_all_beats']) samples = db_conn.execute_query(db_conn.query_map['get_all_samples']) samples_used = set() # Add all Sample Ids from Beat Attributes for vertex in beats: attribute_samples = json.loads(vertex['properties']['attributes'][0]['value']) for sample in attribute_samples: samples_used.add(sample['sampleID']) # Add all Sample Ids from existing Samples for vertex in samples: samples_used.add(vertex['id']) # Get the Samples container container_client = blob_service_client.get_container_client('sample') deleted_samples = list() # For each file (blob)... for blob in container_client.list_blobs(): # If this is an audio file (wav or mp3): # and is never referenced in a Beat or Sample Vertex, delete it file_name = blob.name sample_id = file_name.split('_')[0] if sample_id not in samples_used: try: blob_client = container_client.get_blob_client(file_name) blob_client.delete_blob() deleted_samples.append(file_name) except e as Exception: raise return deleted_samples
18,238
bfd6f410d01657b2e2fc5c306fed40172cfb94c5
# -*- coding: utf-8 -*- """ Spyder Editor This is a temporary script file. """ # Import libraries import pandas_datareader as pdr import pandas as pd import matplotlib.pyplot as plt from xgboost import XGBClassifier, XGBRegressor from sklearn import metrics import numpy as np from operator import itemgetter # Get data df = pdr.get_data_yahoo('SBUX', '2010-01-01', '2021-02-03').rename({'Adj Close': 'price'}, axis = 1) # Calculate % change df['pct_change'] = df['price'].pct_change() df.dropna(inplace = True) # Function to get trades def optimal_trading(x, n): # Buy if x <= -n: return -1 # Sell elif x >= n: return 1 # Hold else: return 0 # Iterate to find optimal magnitude df.reset_index(inplace = True) scores = [] for n in np.arange(.05, .5, .05): df['trades'] = df['pct_change'].apply(lambda x: optimal_trading(x, n)) df['trades'].iloc[0] = -1 df['trades'].iloc[-1] = 1 ledger = pd.DataFrame() # Initialize ledger trades = 0 amount = 10000 quant = 0 # Calculate trade activity for idx, row in df.iterrows(): date, price, units = row['Date'], row['price'], amount // row['price'] if row['trades'] == -1: amount -= (units * price) quant += units trades += 1 # Hold elif row['trades'] == 0: pass # Sell elif row['trades'] == -1: amount += (units * price) quant -= units trades += 1 ledger = ledger.append({'dt': date, 'price': price, 'trades': row['trades'], 'shares': quant, 'num_trades': trades, 'total': amount + (quant * price)}, ignore_index = True) scores.append((n, ledger['total'].iloc[-1])) best = max(scores, key = itemgetter(1))
18,239
614cc8d76a07bd551be611b58d62f853d1bfec26
import numpy as np import matplotlib.pyplot as plt import pyneb as pn # Tell PyNeb tu use parallelisation pn.config.use_multiprocs() ### General settings # Setting verbosity level. Enter pn.my_logging? for details pn.log_.level = 2 # set this to 3 to have more details # Adopt an extinction law extinction_law = 'CCM89' # Define the data file obs_data = 'test.dat' # Define plot title title = 'SMC 24' ### Read and deredden observational data # define an Observation object and assign it to name 'obs' obs = pn.Observation() # fill obs with data read from file obs_data, with lines varying across rows and a default percent error on line intensities obs.readData(obs_data, fileFormat='lines_in_rows_err_cols', errIsRelative=False)#, err_default=0.05) # Add a number of "fake" observations aroung the right one, to make Monte Carlo statistics obs.addMonteCarloObs(N = 500) obs.def_EBV(label1="H1r_6563A", label2="H1r_4861A", r_theo=2.85) # deredden data with Cardelli's law obs.extinction.law = extinction_law obs.correctData() print("PyNeb: cH(b) = {:.2f} +/- {:.2f}".format(np.median(obs.extinction.cHbeta),np.std(obs.extinction.cHbeta))) ### Include the diagnostics of interest # instantiate the Diagnostics class diags = pn.Diagnostics() # include in diags the relevant line ratios diags.addDiag([ '[NII] 5755/6584', '[OII] 3726/3729', '[OIII] 4363/5007', '[SII] 6731/6716', '[SII] 4072+/6720+', '[SIII] 6312/18.7m', '[NeIII] 3930+/15.6m', ]) diags.addClabel('[SII] 6731/6716', '[SII]a') diags.addClabel('[SII] 4072+/6720+', '[SII]b') #The observed ratio can be automatically extracted from an Observation object named obs: Te, Ne = diags.getCrossTemDen('[NII] 5755/6548', '[SII] 6731/6716', obs=obs) print('Te = {:.0f} +/- {:.0f}'.format(np.mean(Te), np.std(Te))) print('Ne = {:.0f} +/- {:.0f}'.format(np.mean(Ne), np.std(Ne)))
18,240
03d598c18f41a49dd8aec5c34809bb0bd083b4da
import pandas as pd from io import StringIO from sklearn.preprocessing import Imputer import numpy as np from sklearn.impute import SimpleImputer from sklearn.preprocessing import LabelEncoder # 将数据集分为 训练集 和 测试集 df_wine = pd.read_csv('./wine.data', header=None) # 读取葡萄酒数据集 print(df_wine) df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline'] print('Class labels', np.unique(df_wine['Class label'])) # 三种酒 print(df_wine.head()) from sklearn.model_selection import train_test_split X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, stratify=y) # 把特征保持在同一尺度上 : 归一化 与 标准化 # 归一化通常是指把特征的比例调整到[0,1]区间 ex = np.array([0, 1, 2, 3, 4, 5]) print('normalized:', (ex - ex.min()) / (ex.max() - ex.min())) from sklearn.preprocessing import MinMaxScaler mms = MinMaxScaler() X_train_norm = mms.fit_transform(X_train) print(X_train_norm) X_test_norm = mms.transform(X_test) print(X_test) # 标准化对于许多线性模型都十分有必要 print('standardized:', (ex - ex.mean()) / ex.std()) from sklearn.preprocessing import StandardScaler stdsc = StandardScaler() X_train_std = stdsc.fit_transform(X_train) X_test_std = stdsc.transform(X_test) # 选择有意义的特征 # 正则化 print('normalized:', (ex - ex.min()) / (ex.max() - ex.min())) from sklearn.linear_model import LogisticRegression lr = LogisticRegression(penalty='l1', C=1.0) lr.fit(X_train_std, y_train) print('Training accuracy:', lr.score(X_train_std, y_train)) print('Test accuracy:', lr.score(X_test_std, y_test)) print(lr.intercept_) print(lr.coef_)
18,241
d16b7b229df9fb15dd6daa0faf82d929d0bdda40
#estatistica #arquivo feito por guilherme del rio from math import * class dados_continuos(): dados = (()) def __init__ (self, *agrs): dados=(args) def add_data(data_range, data): dados def media_aritimetica(dados): for x in dados: soma += x return (soma/len(dados)) def media_ponderada(dados): for x in dados: soma +=( x[0] * x[1]) p += x[1] return (soma/p) def media_geometrica(dados): for x in dados: soma += x return (root(len(dados),soma)) def media_harmonica(dados): for x in dados: soma += x**-1 return (len(dados)/soma)
18,242
23c1326a07ad95e705259affedb460470d28969e
class Solution(object): def shortestDistance(self, wordsDict, word1, word2): index1 = -1 index2 = -1 minimum = float("inf") for i in range(len(wordsDict)): if word1 == wordsDict[i]: index1 = i if word2 == wordsDict[i]: index2 = i if index1 != -1 and index2 != -1: minimum = min(minimum, abs(index2 - index1)) return minimum
18,243
ecd3c4b493bcaac8226dc129ea649200d509b3c4
""" .. module:: utils :platform: Unix :synopsis: Miscellaneous utils .. moduleauthor:: Daniele Zanotelli <dazano@gmail.com> """ import logging logger = logging.getLogger(__name__) def log_call(func, *args, **kw): """Produce a info message which logs the *func* call. """ # concatenate args and kw args transforming string values # from 'value' to '"value"' in order to pretty display em func_args = [] # concatenate positional args args = list(args) if args: for i, arg in enumerate(args): if type(arg) is str: args[i] = '"{}"'.format(arg) func_args.extend([str(arg) for arg in args]) # concatenate non positional args if kw: for key, value in kw.items(): if type(value) is str: kw[key] = '"{}"'.format(value) func_args.extend(["{k}={v}".format(k=k, v=v) for k, v in kw.items()]) # print the log message msg = "[DRYRUN] call to '{func}({args})'" msg = msg.format(func=func.__name__, args=", ".join(func_args)) logger.info(msg)
18,244
08ce064ab0ce7e890125b8f4174cc62971f4c7b0
#!/usr/bin/env python """ Collatz photosynthesis model. That's all folks. """ __author__ = "Martin De Kauwe" __version__ = "1.0 (11.01.2020)" __email__ = "mdekauwe@gmail.com" import sys import numpy as np import os import math import constants as cnt class CollatzC3(object): """ Collatz photosynthesis model. Reference ========= * Collatz, G. J., Ball, J. T., Grivet, C., and Berry, J. A. (1991) Physiological and environmental regulation of stomatal conductance, photosynthesis and transpiration: amodel that includes alaminar boundary layer, Agr. Forest Meteorol., 54, 107–136. * Clark DB, Mercado LM, Sitch S, Jones CD, Gedney N, Best MJ, Pryor M, Rooney GG, Essery RLH, Blyth E, et al. 2011. The Joint UK Land Environment Simulator (JULES), Model description – Part 2: Carbon fluxes and vegetation. Geoscientific Model Development Discussions 4: 641–688. """ # Note in Clark et al. Ko25=30.0*1E4, using 1E3 to match Eller, check if # that is a mistake def __init__(self, Oa=21000.0, gamstar25=42.75, Kc25=30.0, Ko25=30.0*1E3, Q10_Kc=2.1, Q10_Ko=1.2, Q10_Vcmax=2.0, Tlower=10.0, Tupper=50.0, gamma25=2600.0, Q10_gamma=0.57, alpha=0.08, omega=0.15, beta1=0.83, beta2=0.93): self.gamma25 = gamma25 # coefficents for CO2 compensation point (Pa) self.Kc25 = Kc25 # MM coefficents for carboxylation by Rubisco (Pa) self.Ko25 = Ko25 # MM coefficents for oxygenation by Rubisco (Pa) self.Q10_Ko = Q10_Ko # Q10 value for MM constants for O2 self.Q10_Kc = Q10_Kc # Q10 value for MM constants for CO2 self.Q10_Vcmax = Q10_Vcmax # Q10 value for carboxylation of Rubisco self.Q10_gamma = Q10_gamma # Q10 value for Rubisco specificity for CO2 # relative to O2 self.Tlower = Tlower # Lower temperature for carboxylation self.Tupper = Tupper # Upper temperature for carboxylation self.Oa = Oa # the partial pressure of atmospheric oxygen (Pa) self.alpha = alpha # quantum efficiency of # photosynthesis (mol CO2 mol-1 PAR) self.omega = omega # leaf scattering coefficent for PAR (unitless) self.beta1 = beta1 # smoothing co-limitation coefficient self.beta2 = beta2 # smoothing co-limitation coefficient def calc_photosynthesis(self, Ci, Tleaf, PAR, Vcmax25): """ Parameters ---------- Ci : float leaf intercellular CO2 partial pressure (Pa) Tleaf : float leaf temp (deg C) PAR : float photosynthetically active radiation (mol m-2 s-1) Vcmax25 : float Maximum rate of rubisco activity 25C (mol m-2 s-1) """ Tk = Tleaf + cnt.DEG_2_KELVIN # CO2 compensation point in the absence of mitochondrial resp (Pa) gamma = self.calc_CO2_compensation_point(Tleaf) # calculate temp depend of Michaelis Menten constants for CO2, O2 Km = self.calc_michaelis_menten_constants(Tleaf) # Max rate of rubisco activity (mol m-2 s-1) Vcmax = self.correct_vcmax_for_temperature(Vcmax25, Tleaf) # Leaf day respiration (mol m-2 s-1) Rd = Vcmax * 0.01 # Leaf-level photosynthesis: Rubisco-limited rate (Pa) Ac = Vcmax * ((Ci - gamma) / (Ci + Km)) # Leaf-level photosynthesis: Light-limited rate (Pa) Al = self.alpha * (1.0 - self.omega) * PAR \ * ((Ci - gamma) / (Ci + 2.0 * gamma)) # Leaf-level photosynthesis: rate of transport of photosynthetic # products Ae = 0.5 * Vcmax # The rate of gross photosynthesis (W) is calculated as the smoothed # minimum of three potentially-limiting rates A = self.beta1 B = -(Ac + Al) C = Ac * Al A_gross1 = self.quadratic(a=A, b=B, c=C, large=False) A = self.beta2 B = -(A_gross1 + Ae) C = A_gross1 * Ae A_gross2 = self.quadratic(a=A, b=B, c=C, large=False) # Rate of gross photosynthesis (mol CO2 m-2 s-1) Ag = A_gross2 # Rate of net photosynthesis (mol CO2 m-2 s-1) An = Ag - Rd return An def calc_photosynthesis_given_gc(self, Cs, Tleaf, PAR, Vcmax25, gc, press): """ Parameters ---------- Cs : float leaf CO2 partial pressure (Pa) Tleaf : float leaf temp (deg C) PAR : float photosynthetically active radiation (mol m-2 s-1) Vcmax25 : float Maximum rate of rubisco activity 25C (mol m-2 s-1) gc : float stomatal conductance to CO2 press: float atmospheric pressure (Pa) """ Tk = Tleaf + cnt.DEG_2_KELVIN # CO2 compensation point in the absence of mitochondrial resp (Pa) gamma = self.calc_CO2_compensation_point(Tleaf) # calculate temp depend of Michaelis Menten constants for CO2, O2 Km, Ko, Kc = self.calc_michaelis_menten_constants(Tleaf, ret_cnts=True) # Max rate of rubisco activity (mol m-2 s-1) Vcmax = self.correct_vcmax_for_temperature(Vcmax25, Tleaf) # Leaf day respiration (mol m-2 s-1) Rd = Vcmax * 0.01 # Leaf-level photosynthesis: Rubisco-limited rate (Pa) a = Vcmax b = Kc * (1.0 + self.Oa / Ko) c = (Rd - a) - (gc / press) * (Cs + b) d = (gc / press) * (a * Cs - a * gamma - Rd * Cs - b * Rd) Ac = (-(c / 2.0) - math.sqrt(((c / 2.0)**2) - d)) + Rd # Leaf-level photosynthesis: Light-limited rate (Pa) a = self.alpha * (1.0 - self.omega) * PAR b = 2.0 * gamma c = (Rd - a) - (gc / press) * (Cs + b) #d = (gc/Pa)*(a * Ca - a * photocomp - Rd * Ca - b*Rd) d = (gc / press) * (a * Cs - a * gamma - Rd * Cs - b * Rd) Al = (-(c / 2.0) - math.sqrt(((c / 2.0)**2.0) - d)) + Rd # Leaf-level photosynthesis: rate of transport of photosynthetic # products Ae = 0.5 * Vcmax # Co-limitation # The rate of gross photosynthesis (W) is calculated as the smoothed # minimum of three potentially-limiting rates A = self.beta1 B = -(Ac + Al) C = Ac * Al A_gross1 = self.quadratic(a=A, b=B, c=C, large=False) A = self.beta2 B = -(A_gross1 + Ae) C = A_gross1 * Ae A_gross2 = self.quadratic(a=A, b=B, c=C, large=False) # Rate of gross photosynthesis (mol CO2 m-2 s-1) Ag = A_gross2 # Rate of net photosynthesis (mol CO2 m-2 s-1) An = Ag - Rd return An def calc_ci_at_colimitation_point(self, Ci, Tleaf, PAR, Vcmax25): """ Parameters ---------- Ci : float leaf intercellular CO2 partial pressure (Pa) Tleaf : float leaf temp (deg C) PAR : float photosynthetically active radiation (mol m-2 s-1) Vcmax25 : float Maximum rate of rubisco activity 25C (mol m-2 s-1) """ Tk = Tleaf + cnt.DEG_2_KELVIN # CO2 compensation point in the absence of mitochondrial resp (Pa) gamma = self.calc_CO2_compensation_point(Tleaf) # calculate temp depend of Michaelis Menten constants for CO2, O2 Km, Ko, Kc = self.calc_michaelis_menten_constants(Tleaf, ret_cnts=True) # Max rate of rubisco activity (mol m-2 s-1) Vcmax = self.correct_vcmax_for_temperature(Vcmax25, Tleaf) # Leaf day respiration (mol m-2 s-1) Rd = Vcmax * 0.01 # Leaf-level photosynthesis: Light-limited rate (Pa) Al = self.alpha * (1.0 - self.omega) * PAR # Leaf-level photosynthesis: rate of transport of photosynthetic # products Ae = 0.5 * Vcmax # Co-limitated A A = self.beta2 B = -(Al + Ae) C = Al * Ae A_colimit = self.quadratic(a=A, b=B, c=C, large=False) # Ci at the colimitation point a_bnd = -Vcmax * gamma b_bnd = Vcmax d_bnd = Kc * (1.0 + self.Oa / Ko) e_bnd = 1.0 Ci_col = (a_bnd - d_bnd * A_colimit) / (e_bnd * A_colimit - b_bnd) return Ci_col def calc_michaelis_menten_constants(self, Tleaf, ret_cnts=False): """ Michaelis-Menten constant for O2/CO2, Arrhenius temp dependancy Parameters: ---------- Tleaf : float leaf temperature [deg K] Returns: ---------- Km : float Michaelis-Menten constant """ # Michaelis Menten constants for CO2 (Pa) Kc = self.Q10_func(self.Kc25, self.Q10_Kc, Tleaf) # Michaelis Menten constants for O2 (Pa) Ko = self.Q10_func(self.Ko25, self.Q10_Ko, Tleaf) Km = Kc * (1.0 + self.Oa / Ko) if ret_cnts: return Km, Ko, Kc else: return Km def calc_CO2_compensation_point(self, Tleaf): """ Photorespiration compensation point (Pa) """ # Rubisco specificity for CO2 relative to O2 tau = self.Q10_func(self.gamma25, self.Q10_gamma, Tleaf) gamma = self.Oa / (2.0 * tau) return gamma def Q10_func(self, k25, Q10, Tleaf): """ Q10 function to calculate parameter change with temperature """ return k25 * (Q10**((Tleaf - 25.0) / 10.0)) def correct_vcmax_for_temperature(self, Vcmax25, Tleaf): """ Correct Vcmax based on defined by PFT-specific upper and lower temperature params, see Clark et al. (mol CO2 m-2 s-1) """ num = self.Q10_func(Vcmax25, self.Q10_Vcmax, Tleaf) den = (1.0 + math.exp(0.3 * (Tleaf - self.Tupper))) * \ (1.0 + math.exp(0.3 * (self.Tlower - Tleaf))) return num / den def quadratic(self, a=None, b=None, c=None, large=False): """ minimilist quadratic solution as root for J solution should always be positive, so I have excluded other quadratic solution steps. I am only returning the smallest of the two roots Parameters: ---------- a : float co-efficient b : float co-efficient c : float co-efficient Returns: ------- val : float positive root """ d = b**2.0 - 4.0 * a * c # discriminant if d < 0.0: raise ValueError('imaginary root found') #root1 = np.where(d>0.0, (-b - np.sqrt(d)) / (2.0 * a), d) #root2 = np.where(d>0.0, (-b + np.sqrt(d)) / (2.0 * a), d) if large: if math.isclose(a, 0.0) and b > 0.0: root = -c / b elif math.isclose(a, 0.0) and math.isclose(b, 0.0): root = 0.0 if c != 0.0: raise ValueError('Cant solve quadratic') else: root = (-b + np.sqrt(d)) / (2.0 * a) else: if math.isclose(a, 0.0) and b > 0.0: root = -c / b elif math.isclose(a, 0.0) and math.isclose(b, 0.0): root == 0.0 if c != 0.0: raise ValueError('Cant solve quadratic') else: root = (-b - np.sqrt(d)) / (2.0 * a) return root if __name__ == "__main__": Vcmax25 = 0.0001 # Maximum rate of rubisco activity 25C (mol m-2 s-1) Tleaf = 35.0 # Leaf temp (deg C) Ci = 40. * 0.7 # leaf interceullular partial pressure (Pa) PAR = 0.002 # photosynthetically active radiation (mol m-2 s-1) C = CollatzC3() An = C.calc_photosynthesis(Ci, Tleaf, PAR, Vcmax25) print(An)
18,245
b408604f0b4b53278dceffe3004e9ebd8026eeea
from turtle import Turtle FONT = "Courier" FONTSIZE = 16 FONTTYPE = "normal" FONTCOLOR = "White" ALIGNMENT = "center" class ScoreBoard(Turtle): def __init__(self): super().__init__() self.penup() self.color(FONTCOLOR) self.goto(0, 280) self.score_p1 = 0 self.score_p2 = 0 self.hideturtle() self.update_scoreboard() def update_scoreboard(self): self.write(f" PlayerA {self.score_p1} : {self.score_p2} PlayerB", move=False, align=ALIGNMENT, font=(FONT, FONTSIZE, FONTTYPE)) def increase_score(self, player): if player == 1: self.score_p1 += 1 elif player == 2: self.score_p2 += 1 self.clear() self.update_scoreboard() def game_over(self): self.goto(0,0) self.write("GAME OVER", move=False, align=ALIGNMENT, font=(FONT, FONTSIZE, FONTTYPE))
18,246
09e93c507989b8b4e461152b848d2fa76878efde
#coding=utf8 import torndb import time from pyquery import PyQuery as pq import requests db = torndb.Connection("192.168.2.168", "db_income2","root","deng") db1 = torndb.Connection("192.168.2.168", "db_customer","root","deng") from random import randint def addNew(name, company, company_id): hd = { "Connection": "keep-alive", "Pragma": "no-cache", "Cache-Control": "no-cache", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8", "Referer": "https://www.tianyancha.com/", "Accept-Encoding": "gzip, deflate, br", "Accept-Language": "zh-CN,zh;q=0.9", "Cookie": "TYCID=a7511930f38511e8ac10d31168af962d; undefined=a7511930f38511e8ac10d31168af962d; ssuid=4016186264; _ga=GA1.2.1585521434.1543461602; _gid=GA1.2.365761508.1545014447; __insp_wid=677961980; __insp_nv=true; __insp_targlpu=aHR0cHM6Ly93d3cudGlhbnlhbmNoYS5jb20vc2VhcmNoP2tleT0lRTUlQjklQkYlRTUlQjclOUUlRTYlOTglOUYlRTUlQTUlODclRTclQTclOTElRTYlOEElODAlRTYlOUMlODklRTklOTklOTAlRTUlODUlQUMlRTUlOEYlQjgmcm5kPSZybmQ9JnJuZD0mcm5kPSZybmQ9JnJuZD0mcm5kPSZybmQ9JnJuZD0mcm5kPSZybmQ9JnJuZD0mcm5kPSZybmQ9JnJuZD0mcm5kPSZybmQ9JnJuZD0mcm5kPQ%3D%3D; __insp_targlpt=5bm%2F5bee5pif5aWH56eR5oqA5pyJ6ZmQ5YWs5Y_4X_ebuOWFs_aQnOe0oue7k_aenC3lpKnnnLzmn6U%3D; __insp_norec_sess=true; aliyungf_tc=AQAAAC35bjpr0A0A5RmMPYm9WOe4Asnq; csrfToken=-w8hA9i4CJSXiQbygq8QN4V6; Hm_lvt_e92c8d65d92d534b0fc290df538b4758=1543461602,1545014446,1545109876; RTYCID=85f7164c9b6742548df2ea761e5cb3e5; CT_TYCID=3dbeede0a633496cbbb817286edcdfda; cloud_token=4e228ffdf3f348b8a7485512bf993248; cloud_utm=1f0e59ac73274334ba6d8850f453457e; _gat_gtag_UA_123487620_1=1; tyc-user-info=%257B%2522myQuestionCount%2522%253A%25220%2522%252C%2522integrity%2522%253A%25220%2525%2522%252C%2522state%2522%253A%25220%2522%252C%2522vipManager%2522%253A%25220%2522%252C%2522onum%2522%253A%25220%2522%252C%2522monitorUnreadCount%2522%253A%25220%2522%252C%2522discussCommendCount%2522%253A%25220%2522%252C%2522token%2522%253A%2522eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODAyNzMxODY2OCIsImlhdCI6MTU0NTExMDIzOSwiZXhwIjoxNTYwNjYyMjM5fQ.LbHiFyZi0nHAI-c23DGTrqGSUV_oDhW9SwFSmSklkMDXQxO2Seygup2C_v9kQVAEM_ZgIq7Y8PYni3Lf7Porhw%2522%252C%2522redPoint%2522%253A%25220%2522%252C%2522pleaseAnswerCount%2522%253A%25220%2522%252C%2522vnum%2522%253A%25220%2522%252C%2522bizCardUnread%2522%253A%25220%2522%252C%2522mobile%2522%253A%252218027318668%2522%257D; auth_token=eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiIxODAyNzMxODY2OCIsImlhdCI6MTU0NTExMDIzOSwiZXhwIjoxNTYwNjYyMjM5fQ.LbHiFyZi0nHAI-c23DGTrqGSUV_oDhW9SwFSmSklkMDXQxO2Seygup2C_v9kQVAEM_ZgIq7Y8PYni3Lf7Porhw; __insp_slim=1545110258939; Hm_lpvt_e92c8d65d92d534b0fc290df538b4758=1545110259" } doc = pq('https://www.tianyancha.com/search?key='+name,headers=hd) print 'https://www.tianyancha.com/search?key='+name link_company = doc(".name").eq(0).text().replace(" ", "") # print "tyc_company_name", link_company, if link_company: is_good =6 if link_company == company: is_good= 5 print link_company,"对比中:::",company, link_company == company db1.execute( "update t_customer set is_get=%s,is_get_at=now(),tyc_name=%s where id=%s", is_good, link_company, company_id) else: print "天眼查抓取失败,需打码" ,link_company time.sleep(15) def dowork(): num =1 for item in db1.query( "select id,company_reguid_new,company from t_customer where company_reguid_new is not null and (is_get =1 or is_get=3 or is_get=6) order by created_at "): num = num+1 print "开始 ", item.company_reguid_new, num,item.id, item.company if item.company and item.company_reguid_new != "-": db1.execute( "update t_customer set is_get=7,is_get_at=now() where id=%s", item.id) addNew(item.company_reguid_new,item.company, item.id) time.sleep(1) dowork() print "done"
18,247
da8d0d3de2df6d5e332040ad93aa3f4edae37abc
from django.shortcuts import render from django.http import HttpResponse from extra_hands_app.models import Teacher, Client, Available_Time, Event, Email_List, Click from forms import EventForm, UserForm, TeacherForm, ClientForm, AvailableTimeForm from django.http import HttpResponseRedirect, HttpResponseNotAllowed, Http404, HttpResponseForbidden from django.core.urlresolvers import reverse from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from django.contrib import messages import datetime from decimal import Decimal from dateutil.parser import parse as parse_date import pytz # from templated_email import send_templated_mail from mail_templated import send_mail # Create your views here. def index(request): context_dict={} response = render(request, 'base.html', context_dict) return response #Allows a logged-in user to view a specific teacher profile. @login_required def get_teacher(request, teacher_slug): context_dict ={} try: teacher = Teacher.objects.get(slug=teacher_slug) context_dict['teacher'] = teacher except Teacher.DoesNotExist: pass return render(request, 'teacher.html', context_dict) #View a list of teachers with links to their individual profiles. @login_required def get_all_teachers(request): teachers = Teacher.objects.all() context_dict ={'teachers':teachers} response = render(request, 'teachers.html', context_dict) return response #Allows a logged in user to view details about a client. This will likely be decommissioned in the final app @login_required def get_client(request, client_slug): context_dict = {} try: client = Client.objects.get(client_slug=client_slug) context_dict['client'] = client except Client.DoesNotExist: pass client_events = Event.objects.filter(client = client) context_dict['events'] = client_events return render(request, 'client.html', context_dict) #List of all clients with links to their individual profiles. Will likely be removed. @login_required def get_all_clients(request): clients = Client.objects.all() context_dict = {'clients': clients} return render(request, 'clients.html', context_dict) #First step for creating an event. Shows the Event form and posts the initial event information. @login_required def add_event(request, client_slug): try: client = Client.objects.get(client_slug=client_slug) except Client.DoesNotExist: client = None if request.user == client.user: if request.method == 'POST': form = EventForm(request.POST) if form.is_valid(): if client: event = form.save(commit=False) event.client = client event.save() url = 'event/' + str(event.pk) +'/select-teacher/' messages.success(request, "Initial creation of the event was successful. Please select teachers to email below.") times_available = get_all_times_available_for_event(event) for time in times_available: if time.pk not in event.times_available and time.pk not in event.times_emailed: event.times_available.extend([time.pk]) event.save() return HttpResponseRedirect(url) else: print form.errors else: form = EventForm() context_dict = {'form': form, 'client': client} return render(request, 'add_event.html', context_dict) else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) #Creates available time for a specific teacher. @login_required def add_time(request, teacher_slug): try: teacher = Teacher.objects.get(slug = teacher_slug) except Teacher.DoesNotExist: teacher = None return Http404("Teacher does not exist!") if request.user == teacher.user: if request.method == 'POST': #Check for double bookings if check_double_booked_time(teacher, request.POST.get('start_time'), request.POST.get('end_time')): messages.error(request, "This time is already booked or your times overlap! Please correct your entry and try again.") return HttpResponseRedirect("/teacher/" + teacher.slug + "/add_time/") #Create the form, validate, and save form = AvailableTimeForm(request.POST) if form.is_valid(): if teacher: time = form.save(commit=False) time.teacher = teacher time.save() messages.success(request, "Your availability was added successfully.") return HttpResponseRedirect("/myaccount/") else: print form.errors else: form = AvailableTimeForm() context_dict = {'form': form, 'teacher': teacher} return render(request, 'add_time.html', context_dict) else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) #Allows a teacher to edit time they previously submitted @login_required def edit_time(request, time_pk): try: time = Available_Time.objects.get(pk=time_pk) except Available_Time.DoesNotExist: time = None dict ={'class_event': "alert-danger", 'message': "This scheduled time does not exist!", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) if request.user == time.teacher.user: if request.method == 'POST': form = AvailableTimeForm(request.POST) if form.is_valid(): time.start_time = form.cleaned_data['start_time'] time.end_time = form.cleaned_data['end_time'] time.save() messages.success(request, "Your availability was updated successfully.") return HttpResponseRedirect("/myaccount/") else: print form.errors else: form = AvailableTimeForm(instance=time) context_dict = {'form': form, 'time':time} return render(request, 'edit_time.html', context_dict) else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) #Allows a teacher or user to delete their own scheduled time objects @login_required def delete_time(request, time_pk): try: time = Available_Time.objects.get(pk=time_pk) except Available_Time.DoesNotExist: time = None dict ={'class_event': "alert-danger", 'message': "This scheduled time does not exist!", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) if request.user == time.teacher.user: time.delete() messages.success(request, "You have successfully deleted this time") return HttpResponseRedirect("/myaccount/") else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) #Allows a client to edit an event they have created @login_required def edit_event(request, event_pk): event = Event.objects.get(pk=event_pk) if request.user == event.client.user: if request.method == 'POST': form = EventForm(request.POST) if form.is_valid(): event.start_time= form.cleaned_data['start_time'] event.end_time = form.cleaned_data['end_time'] event.comments = form.cleaned_data['comments'] event.is_on_call = form.cleaned_data['is_on_call'] # other editable fields go here event.save() messages.success(request, "Event was successfully edited.") return HttpResponseRedirect("/myaccount/") else: print form.errors else: form =EventForm(instance=event) context_dict ={'form': form, 'event': event} return render(request, 'edit_event.html', context_dict) else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) @login_required def delete_event(request, event_pk): try: event = Event.objects.get(pk= event_pk) except Event.DoesNotExist: event = None dict ={'class_event': "alert-danger", 'message': "This event does not exist!", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) if request.user == event.client.user: if event.in_progress or event.teacher is not None: messages.error(request, "This event is in progress and cannot be modified!") return HttpResponseRedirect("/myaccount/") else: event.delete() messages.success(request, "You have successfully deleted this event.") return HttpResponseRedirect("/myaccount/") else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) #Renders the Registration form for a teacher if a GET and saves the teacher is a POST def register_teacher(request): registered = False if request.method == 'POST': user_form = UserForm(data=request.POST) teacher_form = TeacherForm(data=request.POST) if user_form.is_valid() and teacher_form.is_valid(): #Don't commit here because if the teacher info is bad it will #create a user but not a teacher user= user_form.save(commit=False) #hash the password user.set_password(user.password) user.save() teacher = teacher_form.save(commit=False) #associate the correct user profile with the teacher information teacher.user = user #save the teacher teacher.save() user.save() registered = True messages.success(request, "Thank you for registering! Welcome to your portal.") return HttpResponseRedirect("/myaccount/") else: print user_form.errors, teacher_form.errors #GET Request else: user_form = UserForm() teacher_form = TeacherForm() context_dict = {'user_form': user_form, 'teacher_form': teacher_form, 'registered': registered} return render(request, 'register_teacher.html', context_dict) #Renders the Client registration form for a GET and saves the client with a POST request def register_client(request): registered = False if request.method =='POST': user_form = UserForm(data=request.POST) client_form = ClientForm(data=request.POST) if user_form.is_valid() and client_form.is_valid(): user = user_form.save(commit=False) #hash the password user.set_password(user.password) user.save() client = client_form.save(commit=False) client.user = user client.save() #now commit the user object that the client is saved user.save() registered = True messages.success(request, "Thank you for registering! Welcome to your portal.") else: print user_form.errors, client_form.errors #GET request else: user_form = UserForm() client_form = ClientForm() context_dict = {'user_form': user_form, 'client_form': client_form, 'registered': registered} return render(request, 'register_client.html', context_dict) #Login page for the user def user_login(request): if request.method =='POST': username = request.POST['username'] password = request.POST['password'] user= authenticate(username=username, password=password) if user: if user.is_active: login(request, user) messages.success(request, "You are logged in.") return HttpResponseRedirect("/myaccount/") else: dict ={'class_event': "alert-danger", 'message': "Your account is inactive. Contact the site administrator.", 'url': 'index', 'button_text': "Home" } return render(request, "generic_message.html", dict) else: messages.error(request, "You supplied incorrect login details. Please try again.") return HttpResponseRedirect("/login/") #GET Request, show the form else: return render(request, 'login.html', {}) #Log out the user and redirect them to the index @login_required def user_logout(request): # Since we know the user is logged in, we can now just log them out. logout(request) # Take the user back to the homepage. messages.success(request, "You are now logged out.") return HttpResponseRedirect('/') #The main method for displaying the users information. The request is routed based on the type of user. #For clients, they see a page with their upcoming and pending events. #For teachers, they see a page with their confirmed events and available time calender #The superuser sees all the events, clients, and teacher. Can be customized to be a dashboard for things that Django Admin can't do. @login_required() def my_account(request): user = request.user context_dict={'user':user} is_teacher = False is_client = False is_superuser = False if Teacher.objects.filter(user=user).exists(): is_teacher = True teacher = Teacher.objects.filter(user=user) available_time = Available_Time.objects.filter(teacher=teacher).filter(start_time__gte=datetime.datetime.today() - datetime.timedelta(days=1)).filter(active=True).order_by('start_time') events = Event.objects.filter(teacher=teacher).filter(start_time__gte=datetime.datetime.today() - datetime.timedelta(days=1)).order_by('start_time') context_dict['times'] = available_time context_dict['events'] = events if Client.objects.filter(user=user).exists(): is_client=True client = Client.objects.filter(user=user) client_events_unconfirmed = Event.objects.filter(client=client).filter(teacher=None).filter(start_time__gte=datetime.datetime.today() - datetime.timedelta(days=1)).order_by('start_time') current_client_events = Event.objects.filter(start_time__gte=datetime.datetime.today() - datetime.timedelta(days=1)).exclude(teacher__isnull=True) for event in client_events_unconfirmed: times_available = get_all_times_available_for_event(event) for time in times_available: if time.pk not in event.times_available and time.pk not in event.times_emailed: event.times_available.extend([time.pk]) event.save() sum = 0 for event in client_events_unconfirmed: sum += len(event.times_available) context_dict['notification_sum'] = sum context_dict['unconfirmed_events'] = client_events_unconfirmed context_dict['current_events'] = current_client_events if user.is_superuser: is_superuser = True context_dict['teachers'] = Teacher.objects.all() context_dict['clients'] = Client.objects.all() context_dict['events'] = Event.objects.all().order_by('start_time') context_dict['is_teacher'] = is_teacher context_dict['is_superuser'] = is_superuser context_dict['is_client'] = is_client context_dict['message'] = "You have successfully logged in." return render(request, "myaccount.html", context_dict) #POST method for teachers to toggle their on-call status. #The lack of the teacher URL param prevents this method from being toggled by anyone else and is harmless, redirecting the user's account @login_required def go_on_call(request): if request.method == 'POST': if Teacher.objects.filter(user=request.user).exists(): teacher = Teacher.objects.get(user = request.user) #reverse whatever it is teacher.on_call = not teacher.on_call teacher.save() if teacher.on_call: messages.success(request, "You are now on call!") else: messages.warning(request, "You are not on call anymore!") return HttpResponseRedirect("/myaccount/") else: return HttpResponse("I have no idea how you got here") else: return HttpResponseRedirect("/myaccount/") @login_required() def change_time(request, teacher_pk): teacher = Teacher.objects.get(pk=teacher_pk) if request.user == teacher.user: if request.method =='POST': try: hours = Decimal(request.POST.get("hours")) except ValueError: messages.error(request, "Please enter a decimal value for hours between 0.5 and 3.") return HttpResponseRedirect("/myaccount/") if hours < 0.5 or hours > 3: messages.error(request, "Please enter a decimal value for hours between 0.5 and 3.") return HttpResponseRedirect("/myaccount/") else: teacher.time_between_events = hours teacher.save() messages.success(request, "Your time between events was successfully updated to %s hours" % hours) return HttpResponseRedirect('/myaccount/') else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) #Shows the available teachers for a particular event booking. Displays the teachers who have available times then and their profiles. def show_available_teachers(request, event_pk): user = request.user context_dict={'user':user} event = Event.objects.get(pk=event_pk) context_dict['event'] = event if request.user == event.client.user: available_times =[] emailed_times =[] for time_pk in event.times_available: time = Available_Time.objects.get(pk=time_pk) available_times.append(time) for time_pk in event.times_emailed: time = Available_Time.objects.get(pk=time_pk) emailed_times.append(time) context_dict['emailed_times'] = emailed_times context_dict['times'] = available_times return render(request, 'select_teacher.html', context_dict) else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) #Returns a list of available time given an event. def get_all_times_available_for_event(event): times = Available_Time.objects.filter(active=True) available_times = [] #This is the logic to only grab the times that match the criteria for starting at or before the event time and ending at or after the event. #The reason for grabbing the times and not the teachers is that each specific time has a teacher attached to it, but each teacher just has a list #of all the total times they have entered (which could be a lot and have no relevance) for time in times: if time.start_time <= event.start_time and time.end_time >= event.end_time: available_times.append(time) return available_times #Given an event and a teacher, this method analyzes what to do with the available time that was assigned to the event. #If there is more time left, the method creates new AvailableTime objects and assigns them to the teacher. def get_times_to_deactivate(event, teacher): #all this code needs serious testing times = Available_Time.objects.filter(teacher=teacher) for time in times: #exact match if time.start_time == event.start_time and time.end_time == event.end_time: #set it to false and call it good time.active = False time.save() #available time start before event time and end time is the same if time.start_time < event.start_time and time.end_time == event.end_time: #check for the time delta and create new available time event delta = event.start_time - time.start_time time.active = False time.save() if(delta.seconds > 3600*teacher.time_between_events): new_time = Available_Time() new_time.start_time = time.start_time new_time.end_time = event.start_time - datetime.timedelta(hours=float(teacher.time_between_events)) new_time.teacher = teacher new_time.save() #available time start is the same as event start time and end is after if time.start_time == event.start_time and time.end_time > event.end_time: #check for time delta delta = time.end_time - event.end_time time.active = False time.save() if(delta.seconds > 3600*teacher.time_between_events): new_time = Available_Time() new_time.start_time = event.end_time + datetime.timedelta(hours=float(teacher.time_between_events)) new_time.end_time = time.end_time new_time.teacher = teacher new_time.save() #available time starts before event and ends after event if time.start_time < event.start_time and time.end_time > event.end_time: #check for time delta time.active= False time.save() delta_start = event.start_time - time.start_time delta_end = time.end_time - event.end_time if(delta_start.seconds > 3600*teacher.time_between_events): new_time = Available_Time() new_time.start_time = time.start_time new_time.end_time = event.start_time - datetime.timedelta(hours=float(teacher.time_between_events)) new_time.teacher = teacher new_time.save() if(delta_end.seconds > 3600*teacher.time_between_events): new_time = Available_Time() new_time.start_time = event.end_time + datetime.timedelta(hours=float(teacher.time_between_events)) new_time.end_time = time.end_time new_time.teacher = teacher new_time.save() #Sends emails to the teachers that were selected by the client in the second part of the event creating process. @login_required def send_emails_to_teachers(request, event_pk): event = Event.objects.get(pk=event_pk) subject = "{0} has just sent you an email about an event!".format(event.client.organization) return_address = "noreply@gmail.com" if request.user == event.client.user: if request.method == 'POST': #mark the event as in_progress and no longer available to be deleted event.in_progress = True #Loop through all available teachers and send out mail time_pks = request.POST.getlist('times') for pk in time_pks: time = Available_Time.objects.get(pk = pk) teacher = time.teacher send_email_teacher(event, teacher) #add this time to the emailed list so it cannot be emailed again. event.times_available.remove(int(pk)) event.times_emailed.extend([int(pk)]) event.save() print "This teacher's name is {0}, the primary key is {1}, and their email is {2}".format(teacher.user.get_full_name(), teacher.pk, teacher.user.email) messages.success(request, "Your email(s) was sent successfully.") return HttpResponseRedirect("/myaccount/") else: dict ={'class_event': "alert-danger", 'message': "Something went wrong because you're not using this like you're supposed to.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) #Once a teacher clicks on their personal link, they are taken to this page to confirm that they want to commit to being in the event @login_required def confirm_teacher_part1(request, event_pk, teacher_pk): event = Event.objects.get(pk = event_pk) teacher = Teacher.objects.get(pk = teacher_pk) if request.user == teacher.user: #if the event already has a teacher if event.teacher is not None: #django won't do composite primary keys without fussing so I need to make my own and make sure duplicates don't exist if Click.objects.filter(teacher = teacher.pk).filter(event=event.pk).exists(): dict={'class_event': "alert-warning", 'message': "You have already clicked on this event!", 'url': "myaccount", "button_text": "My Account"} return render(request, "generic_message.html", dict) else: #make a new click object click = Click() click.event = event.pk click.teacher = teacher.pk click.save() dict ={'class_event': "alert-warning", 'message': "Sorry, but this event has already been taken!", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) context_dict = {'teacher': teacher, 'event': event} return render(request, 'teacher_event_confirm.html', context_dict) else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) #Posts the final request and commit the teacher to the event @login_required def confirm_teacher_post(request, event_pk, teacher_pk): event = Event.objects.get(pk = event_pk) teacher = Teacher.objects.get(pk = teacher_pk) if request.user == teacher.user: if request.method == 'POST': #assign the event teacher to the teacher who clicked it. event.teacher=teacher event.save() #find all the times that the teacher has that will be marked inactive and mark them as inactive #run the method for splitting the times into new times and setting to false get_times_to_deactivate(event,teacher) #give the teacher a click click = Click() click.teacher = teacher.pk click.event = event.pk click.save() # send a confirmation email to the client send_email_client(event) messages.success(request, "You successfully signed up for this event! Please make sure your availability was appropriately updated.") return HttpResponseRedirect("/myaccount/") else: dict ={'class_event': "alert-danger", 'message': "Something went wrong because you're not using this like you're supposed to.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) else: dict ={'class_event': "alert-danger", 'message': "You don't have permission to perform this action.", 'url': 'myaccount', 'button_text': "My Account"} return render(request, "generic_message.html", dict) def generic_message(request): return render(request, "generic_message.html", {}) def check_double_booked_time(teacher, start_time, end_time): timezone = pytz.timezone('America/Denver') start_time_parse = timezone.localize(parse_date(start_time)) end_time_parse = timezone.localize(parse_date(end_time)) double_booked = False teacher_times = Available_Time.objects.filter(teacher = teacher) for time in teacher_times: if time.start_time <= start_time_parse <= time.end_time or time.start_time <= end_time_parse <= time.end_time: double_booked = True break return double_booked def send_email_teacher(event, teacher): send_mail( template_name='templated_email/email_teacher.html', from_email='eugene.baibourin@gmail.com', recipient_list=[teacher.user.email], context={ 'event': event, 'teacher': teacher }, ) def send_email_client(event): send_mail( template_name='templated_email/email_client.html', from_email='eugene.baibourin@gmail.com', recipient_list=[event.client.user.email], context={ 'event': event, }, )
18,248
18217349ac9c681d47b54f43d7163e555016e02f
#!/usr/bin/env python """ Start the SimpleDriving action server """ import rospy from motion_planning import vfh_move_server if __name__ == "__main__": rospy.init_node("vfh_mover") MOVER = vfh_move_server.VFHMoveServer() rospy.spin()
18,249
60ef4d83cbad457375e575ba7c5d0f9f3bed02db
# Generated by Django 3.1.7 on 2021-03-19 15:18 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='SaasScPoster', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('qrstyle', models.CharField(db_column='qrStyle', max_length=120)), ('posterid', models.IntegerField(db_column='posterId')), ('title', models.CharField(max_length=120)), ('infoid', models.IntegerField(db_column='infoId')), ('posterimg', models.CharField(db_column='posterImg', max_length=120)), ('createtime', models.IntegerField(db_column='createTime')), ('status', models.IntegerField()), ('shopcode', models.CharField(db_column='shopCode', max_length=20)), ('sellerid', models.IntegerField(db_column='sellerId')), ], options={ 'db_table': 'saas_sc_poster', 'managed': False, }, ), ]
18,250
ec4ba93ac3bee703854dc604a1159df514974716
from pathlib import Path this_location = Path(__file__) root = this_location.parents[1] data_path = root / "data" raw_data_path = data_path / "raw" processed_data_path = data_path / "processed"
18,251
1bfbddb25e1559a2c5efc36b8ad841609185e704
#!/usr/bin/python # -*- coding: ascii -*- from typing import TextIO from os import path db_file_list = [ '/mnt/NAS/inv/GOES-16/ABI-L1b-RadF/20190614/OR_ABI-L1b-RadF-M3C02_G16_s20171671145342_e20171671156109_c20171671156144.nc', '/mnt/NAS/inv/GOES-16/ABI-L1b-RadF/20190614/OR_ABI-L1b-RadF-M3C03_G16_s20171671145342_e20171671156109_c20171671156144.nc'] db_files: TextIO = open('db_files.txt', 'w') for entry in db_file_list: entry = entry + '\n' print(entry) for entry in db_file_list: db_files.write(entry + '\n') db_files.close() far_files_list = ['OR_ABI-L1b-RadF-M3C05_G16_s20171671145342_e20171671156109_c20171671156144.nc', 'OR_ABI-L1b-RadF-M3C02_G16_s20171671145342_e20171671156109_c20171671156144.nc'] for entry in far_files_list: entry = entry + '\n' print(entry) far_files: TextIO = open('far_files.txt', 'w') for entry in far_files_list: far_files.write(entry + '\n') far_files.close() db_barefilenames = []
18,252
91b0c1adfdccd96665d4903e22c8b6c040317cf1
from .base import * DEBUG = False ADMINS = ( ('{{cookiecutter.author_name}}', '{{cookiecutter.author_email}}'), ) MANAGERS = ADMINS
18,253
60544b3525e1901698ea2700a90619c7915f0185
import random import numpy from colorama import Fore def SetClock(a , b): res = 0.0 if a<b : res = a elif a == b : res = b else: res = b return res def SetClock_zero(a, b): res = 0.0 if a < b: res = b elif a == b: res = b else: res = a return res import random random.seed(10) def random_generation_A(): r = random.random() r = numpy.log(r) return round((-r / 1.0),2) def random_generation_B(): r1 = random.random() r1 = numpy.log(r1) return round((-r1 / 1.2), 2) def bprint(sentence,val): p = [None,None] p[0]=sentence p[1]=val return print(Fore.BLUE+p[0], p[1]) def pprint(sentence,val): p = [None,None] p[0]=sentence p[1]=val return print(Fore.LIGHTMAGENTA_EX+p[0], p[1]) def gprint(sentence,val): p = [None,None] p[0]=sentence p[1]=val return print(Fore.LIGHTCYAN_EX+p[0], p[1]) def yprint(sentence,val): p = [None,None] p[0]=sentence p[1]=val return print(Fore.YELLOW+p[0], p[1]) deadline = 1 Ea=0.0 Es=0.0 # A = random_generation(Landa) #S = random_generation(M) A = random_generation_A() Ea = Ea+A deadline = int(input("please enter number of iteration: ")) if deadline < 1: print(Fore.RED + "WRONG NUMBER!!!!!!!!!!") deadline = 1 #use random function Queue=0 NumberService=0 NumberProcess=0 busy=0 clock=0.0 Area_under_B=0.0 Area_under_Q=0.0 last_clock=0.0 last_clock1=0.0 pprint("clock: ", clock) bprint("numberservice: ", NumberService) yprint("Queue: ", Queue) print(Fore.LIGHTCYAN_EX,"eventlist: [",A,", ~]") eventList=[0.0,10000.1] i=0 j=0 timestamp="" out_timestamp="" totdelay=0.0 eventList[0] = eventList[0] + A lastQ = 0 last_clock = A while NumberService != deadline: if(eventList[0] < eventList[1]): if (Queue == 0 and busy == 0): clock = SetClock((eventList[0]), (eventList[1])) pprint("clock: ",clock) NumberService = NumberService + 1 bprint("numberservice: ",NumberService) NumberProcess = NumberProcess + 1 yprint("Queue: ",Queue) S = random_generation_B() Es = Es + S eventList[1] = last_clock + S j = j + 1 i = i + 1 #Area_under_B = Area_under_B + busy * (clock - last_clock) last_clock = clock busy = 1 A = random_generation_A() Ea = Ea + A eventList[0] = eventList[0] + A NumberProcess = NumberProcess + 1 gprint("eventlist: ", eventList) clock = SetClock((eventList[0]), (eventList[1])) if (Queue==0 and busy == 1): clock = SetClock((eventList[0]), (eventList[1])) Area_under_B = Area_under_B + busy * (clock - last_clock) last_clock = clock lastQ = 0 if (busy == 1): i = i + 1 pprint("clock: ", clock) Queue = Queue + 1 timestamp = timestamp + str(clock) +',' A = random_generation_A() Ea = Ea + A eventList[0] = eventList[0] + A NumberProcess = NumberProcess + 1 Area_under_Q = Area_under_Q + Queue*(clock - last_clock) last_clock=clock clock = SetClock((eventList[0]), (eventList[1])) Area_under_B = Area_under_B + busy * (clock - last_clock) lastQ = Queue bprint("numberservice: " , NumberService) yprint("Queue: " , Queue) gprint("eventlist: " , eventList) if (eventList[0] >= eventList[1]): if Queue == 0: clock = SetClock((eventList[0]), (eventList[1])) Area_under_B = Area_under_B + busy * (clock - last_clock) last_clock = clock pprint("clock: ", clock) bprint("numberservice: ", NumberService) yprint("Queue: ", Queue) print(Fore.LIGHTCYAN_EX,"eventlist: [", str(eventList[0]),", ~]") clock = SetClock_zero((eventList[0]),(eventList[1])) busy = 0 i = i+1 NumberService= NumberService+1 A = random_generation_A() Ea = Ea + A eventList[0] = eventList[0] + A NumberProcess = NumberProcess + 1 S = random_generation_B() Es = Es + S eventList[1] = clock + S j = j+1 Area_under_Q = Area_under_Q + lastQ * (clock - last_clock) lastQ = Queue pprint("clock: " , clock) bprint("numberservice: " , NumberService) yprint("Queue: " , Queue) gprint("eventlist: " , eventList) if Queue > 0: lastQ = Queue Queue = Queue - 1 S = random_generation_B() NumberService = NumberService + 1 last_clock1=clock clock = SetClock((eventList[0]), (eventList[1])) busy = 1 out_timestamp = out_timestamp + str(clock) +',' # totdelay = totdelay + ( clock - timestamp) #timestamp=0.0 eventList[1] = clock + S Es = Es + S j = j + 1 pprint("clock: " , clock) bprint("numberservice: " , NumberService) yprint("Queue: " , Queue) gprint("eventlist: " , eventList) Area_under_Q = Area_under_Q + lastQ * (clock - last_clock) last_clock = clock clock = SetClock((eventList[0]),(eventList[1])) Area_under_B = Area_under_B + busy * (clock - last_clock) lastQ = Queue last_clock = clock busy = 1 print(Fore.WHITE,"") m=0 ts=0.0 ots=0.0 timestamp = timestamp.split(',') out_timestamp = out_timestamp.split(',') for m in range(out_timestamp.__len__() - 1): ts = float(timestamp[m]) ots = (float(out_timestamp[m])) totdelay = totdelay + (ots - ts) print(Fore.YELLOW,"*****************FINAL RESULTS******************") bprint("ServiceNumber : ",NumberService) bprint("system clock : ",round(clock,2)) bprint("Area under B(t): ",round(Area_under_B,2)) bprint("Area under Q(t): ",round(Area_under_Q,2)) bprint("Total Delay : ",round(totdelay,2)) Wq = totdelay / NumberService Lq = Area_under_Q / clock p = Area_under_B / clock L = Lq + p gprint("Wq : ",round(Wq,3)) gprint("Lq : ",round(Lq,3)) gprint("p : ",round(p,3)) gprint("L : ",round(L,3)) W = 0.0 n=0 value_of_Es=0.0 value_of_Ea = 0.0 M=0.0 Landa=0.0 throughput=0.0 value_of_Es = Es/NumberService value_of_Ea = eventList[0] / NumberProcess W = Wq + value_of_Es if value_of_Es == 0: print("something is going wrong!!!") else: M = 1/value_of_Es if value_of_Es == 0: print("something is going wrong!!!") else: Landa = 1/value_of_Ea #pprint("ES :",round(value_of_Es,3)) pprint("W :",round(W,3)) #pprint("Landa :",round(Landa,3)) #pprint("Mou :",round(M,3)) print("") if Landa < M: throughput = 1.0 print(Fore.LIGHTBLUE_EX+"***********PAYDAR***********") else: throughput = p*1.2 print(Fore.RED + "!!!! NAPAYDAR !!!!") import time import sys print(Fore.YELLOW+"") animation = "|" i=0 for i in range(15): time.sleep(0.1) sys.stdout.write(animation) sys.stdout.flush() yprint("syetem THROUGHPUT :",round(throughput,1)) print("") #@NAZANIN BAYATI
18,254
d17cc4974943a7969f521cb4e82c9d3f1c64d8e6
import pandas as pd import numpy as np import json import re from flask import Flask from flask import request from flask import jsonify from flask_mysqldb import MySQL from sklearn.externals import joblib app = Flask(__name__) # SQL config app.config['MYSQL_HOST'] = 'remotemysql.com' app.config['MYSQL_USER'] = 'vMg935rEqf' app.config['MYSQL_PASSWORD'] = 'SkgUJ4EGYq' app.config['MYSQL_DB'] = 'vMg935rEqf' mysql = MySQL(app) model = joblib.load('model.pkl') f = open('./features.txt', 'r') features = f.read().split(',') f = open('./symptoms.txt', 'r') symptoms = f.read().split(',') f.close() feature_dict = {} for i, f in enumerate(features): feature_dict[f] = i @app.route('/predict', methods=['POST']) def predict(): search = [] data = request.get_json()['symptoms'] cur = mysql.connection.cursor() for x in data: cur.execute( '''SELECT DISTINCT Symptom_CUI FROM vMg935rEqf.`disease-symptom` WHERE Symptom='{0}';'''.format(x)) search.append(cur.fetchone()[0]) sample = np.zeros((len(features),), dtype=np.int) sample = sample.tolist() for i, s in enumerate(search): sample[feature_dict[s]] = 1 sample = np.array(sample).reshape(1, len(sample)) results = model.predict_proba(sample)[0] diseases = [] for x in model.classes_: cur.execute( '''SELECT DISTINCT Disease FROM vMg935rEqf.`disease-symptom` WHERE Disease_CUI = '{0}';'''.format(x)) diseases.append(cur.fetchone()[0]) prob_per_class_dictionary = list(zip(diseases, results)) results_ordered_by_probability = list(map( lambda x: {"disease": x[0], "prop": x[1] * 100}, sorted(zip(diseases, results), key=lambda x: x[1], reverse=True))) return jsonify(results_ordered_by_probability[0:10]) @app.route('/symptom', methods=['GET']) def symptom(): return jsonify(symptoms) if __name__ == '__main__': app.run(port=5000, debug=True)
18,255
a91adf290626f681c8e66cacd38424123c98d18b
class Solution: # @return a tuple, (index1, index2) def twoSum(self, num, target): seen = {} for i in range(len(num)): res = target -num[i] if res not in seen: seen[num[i]] = i+1 else: return seen[res], i+1
18,256
ada3abee1d55c0bc81327e5fb4415e619d809580
num = int(input('Please enter a number:')) if num < 0 : print('-1') low = 0 hight = num mid = (low+hight)/2.0 sign = mid while mid**2 != num: if mid**2 >num: hight=mid else: low = mid mid = (low+hight)/2.0 if sign == mid: break print(mid) # def binary_search(alist, item): # n = len(alist) # start = 0 # end = n-1 # while start <= end: # mid = (start + end) // 2 # if item == alist[mid]: # return True # elif item < alist[mid]: # end = mid - 1 # elif item > alist[mid]: # start = mid + 1 # return False # def binary_search2(alist, item): # n = len(alist) # if n == 0: # return False # mid = n // 2 # if item == alist[mid]: # return True # elif item < alist[mid]: # return binary_search2(alist[:mid], item) # else: # return binary_search2(alist[mid+1:], item) # if __name__ == '__mian__': # l1 = [22,23,44,64,77,43,76,444,25,54] # print(binary_search2(l1,22))
18,257
a1beb0bf1919b2fabef1b8476219a6323c30d5fe
import numpy as np array1 = np.array(range(30)) array2 = np.array([2,3,5]) with open('homework.npz', 'wb') as f: np.savez(f, array1=array1,array2=array2)
18,258
74b7dcb9dcff4276cf6eb1c85e0b6b3577e8b65c
import numpy as np from utility import linear_assignment as la from sklearn.metrics import accuracy_score from sklearn.metrics import silhouette_score, davies_bouldin_score, calinski_harabasz_score, normalized_mutual_info_score, confusion_matrix from sklearn.metrics.cluster import contingency_matrix from sklearn.cluster import KMeans def calcolaPurity(labelConosciute, labels): contingencymatrix = contingency_matrix(labelConosciute, labels) purity = (np.sum(np.amax(contingencymatrix,axis = 0))/np.sum(contingencymatrix)) return purity def evaluation(X_selected, X_test, n_clusters, y): """ This function calculates ARI, ACC and NMI of clustering results Input ----- X_selected: {numpy array}, shape (n_samples, n_selected_features} input data on the selected features n_clusters: {int} number of clusters y: {numpy array}, shape (n_samples,) true labels Output ------ nmi: {float} Normalized Mutual Information acc: {float} Accuracy """ k_means = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300, tol=0.0001, precompute_distances=True, verbose=0, random_state=None, copy_x=True, n_jobs=1) k_means.fit(X_selected) y_predict = k_means.predict(X_test) # calculate NMI nmi = normalized_mutual_info_score(y, y_predict, average_method='arithmetic') sil = silhouette_score(X_test, y_predict, metric="euclidean") db_score = davies_bouldin_score(X_test, y_predict) ch_score = calinski_harabasz_score(X_test, y_predict) purity = calcolaPurity(y, y_predict) return nmi, sil, db_score, ch_score, purity
18,259
f6e043101889db18dad31493ff4362d24ae09394
""" Created on Wed Dec 16 01:17:48 2020 @author: zym """ import os import pandas as pd import numpy as np import xlwt months = ['2015-01','2015-02','2015-03','2015-04','2015-05','2015-06','2015-07','2015-08','2015-09','2015-10','2015-11','2015-12', '2016-01','2016-02','2016-03','2016-04','2016-05','2016-06','2016-07','2016-08','2016-09','2016-10','2016-11','2016-12', '2017-01','2017-02','2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12', '2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06','2019-07','2019-08','2019-09','2019-10','2019-11','2019-12', '2020-01','2020-02','2020-03','2020-04','2020-05','2020-06'] # 读取上证A股月度数据 print("\n开始读取上证A股月度数据") stkcds = [] # 所有股票的证券代码 Stkcd stocks_Mclsprc = [] # 股票交易月份及对应的月收盘价 Trdmnt Mclsprc fileName = os.getcwd() + "\\上证A股月度数据.xlsx" df = pd.DataFrame(pd.read_excel(fileName)) for i in range(len(df)): s = df.Stkcd[i] t = {} t.clear() if(s not in stkcds): # print(s) stkcds.append(s) t['Stkcd'] = s j = i Mclsprc = [] Mclsprc.clear() while(j<len(df) and df.Stkcd[j]==s): month_Mclsprc = [] month_Mclsprc.clear() month_Mclsprc.append(df.Trdmnt[j]) month_Mclsprc.append(df.Mclsprc[j]) Mclsprc.append(month_Mclsprc) j = j+1 t['Mclsprc'] = Mclsprc stocks_Mclsprc.append(t) print("读取完成") # print(len(stkcds)) # 1533 # print(len(stocks_Mclsprc)) # 1533 # print(stocks_Mclsprc[0]) # stocks_Mclsprc是一个包含若干dict的list 存储读入的A股月度数据 # 每一个dict代表一个股票 以 stocks_Mclsprc[0] 为例 如下所示 # {'Stkcd': 600000, 'Mclsprc': [['2015-01', 14.47], ['2015-02', 14.53], ['2015-03', 15.79], ['2015-04', 18.07], ['2015-05', 17.04], ['2015-06', 16.96], ['2015-07', 15.07], ['2015-08', 14.96], ['2015-09', 16.63], ['2015-10', 16.39], ['2015-11', 18.65], ['2015-12', 18.27], ['2016-01', 16.9], ['2016-02', 18.45], ['2016-03', 17.93], ['2016-04', 17.83], ['2016-05', 18.29], ['2016-06', 15.57], ['2016-07', 15.7], ['2016-08', 16.48], ['2016-09', 16.49], ['2016-10', 16.27], ['2016-11', 17.16], ['2016-12', 16.21], ['2017-01', 16.74], ['2017-02', 16.59], ['2017-03', 16.01], ['2017-04', 15.21], ['2017-05', 12.84], ['2017-06', 12.65], ['2017-07', 13.36], ['2017-08', 12.71], ['2017-09', 12.87], ['2017-10', 12.61], ['2017-11', 12.91], ['2017-12', 12.59], ['2018-01', 13.17], ['2018-02', 12.46], ['2018-03', 11.65], ['2018-04', 11.61], ['2018-05', 10.55], ['2018-06', 9.56], ['2018-07', 10.17], ['2018-08', 10.33], ['2018-09', 10.62], ['2018-10', 10.98], ['2018-11', 10.71], ['2018-12', 9.8], ['2019-01', 10.73], ['2019-02', 11.74], ['2019-03', 11.28], ['2019-04', 11.97], ['2019-05', 11.13], ['2019-06', 11.68], ['2019-07', 11.87], ['2019-08', 11.28], ['2019-09', 11.84], ['2019-10', 12.51], ['2019-11', 11.91], ['2019-12', 12.37], ['2020-01', 11.35], ['2020-02', 10.85], ['2020-03', 10.15], ['2020-04', 10.63], ['2020-05', 10.57], ['2020-06', 10.58]]} # 从上证A股月度数据读取每个月可用的股票 用于后续选股 stkcds_every_month = {} for i in range(len(months)): m = months[i] stkcds_every_month[m] = [] # 初始化 stkcds_tmp = [] # 记录第一次出现的股票 for i in range(len(df)): m = df.Trdmnt[i] s = df.Stkcd[i] s = str(s) # 股票在第一次出现时不用计入 因为后续计算月度收益率时需要同时用到本月收益和上月收益 所以从第二次开始出现的股票才是可用的 # 例如 603278 最早的数据是 2017-11 但实际可用的数据从 2017-12 开始 if(s not in stkcds_tmp): stkcds_tmp.append(s) else: # 从第二次出现时 开始计入当月可用股票 if(s not in stkcds_every_month[m]): stkcds_every_month[m].append(s) # 计算每只股票的月度收益率 # 股票月度收益率 = (本月收盘价-上月收盘价)/上月收盘价 # Mincomes是一个包含若干dict的dict 后续读取股票月度收益率时较为方便 只需要股票的证券代码和交易月份即可读 print("\n开始计算每只股票的月度收益率") Mincomes = {} for i in range(len(stkcds)): s = stocks_Mclsprc[i]['Stkcd'] s = str(s) n = len(stocks_Mclsprc[i]['Mclsprc']) Mincomes_one_stock = {} Mincomes_one_stock.clear() for j in range(n-1): last_month_price = stocks_Mclsprc[i]['Mclsprc'][j][1] this_month = stocks_Mclsprc[i]['Mclsprc'][j+1][0] this_month_price = stocks_Mclsprc[i]['Mclsprc'][j+1][1] month_income = (this_month_price-last_month_price)/last_month_price # 月度收益率 Mincomes_one_stock[this_month] = month_income Mincomes[s] = Mincomes_one_stock print("计算完成") # print(len(Mincomes)) # 1533 # print(Mincomes['600000']) # {'2015-02': 0.004146510020732462, '2015-03': 0.08671713695801789, '2015-04': 0.14439518682710584, '2015-05': -0.057000553403431166, '2015-06': -0.004694835680751074, '2015-07': -0.11143867924528304, '2015-08': -0.007299270072992663, '2015-09': 0.11163101604278061, '2015-10': -0.01443174984966918, '2015-11': 0.13788895668090287, '2015-12': -0.02037533512064338, '2016-01': -0.07498631636562676, '2016-02': 0.091715976331361, '2016-03': -0.028184281842818407, '2016-04': -0.005577244841048602, '2016-05': 0.02579921480650594, '2016-06': -0.14871514488791684, '2016-07': 0.008349389852279962, '2016-08': 0.049681528662420454, '2016-09': 0.0006067961165047336, '2016-10': -0.013341419041843473, '2016-11': 0.05470190534726494, '2016-12': -0.05536130536130532, '2017-01': 0.03269586674892027, '2017-02': -0.008960573476702425, '2017-03': -0.03496081977094625, '2017-04': -0.04996876951905063, '2017-05': -0.1558185404339251, '2017-06': -0.014797507788161956, '2017-07': 0.05612648221343866, '2017-08': -0.04865269461077834, '2017-09': 0.012588512981903882, '2017-10': -0.020202020202020186, '2017-11': 0.023790642347343436, '2017-12': -0.024786986831913268, '2018-01': 0.04606830818109611, '2018-02': -0.053910402429764546, '2018-03': -0.06500802568218302, '2018-04': -0.0034334763948498646, '2018-05': -0.0913006029285098, '2018-06': -0.09383886255924172, '2018-07': 0.06380753138075307, '2018-08': 0.015732546705998048, '2018-09': 0.02807357212003864, '2018-10': 0.03389830508474588, '2018-11': -0.02459016393442619, '2018-12': -0.08496732026143791, '2019-01': 0.09489795918367343, '2019-02': 0.09412861136999065, '2019-03': -0.03918228279386719, '2019-04': 0.06117021276595756, '2019-05': -0.07017543859649121, '2019-06': 0.04941599281221913, '2019-07': 0.01626712328767119, '2019-08': -0.04970513900589721, '2019-09': 0.04964539007092203, '2019-10': 0.056587837837837836, '2019-11': -0.04796163069544362, '2019-12': 0.03862300587741386, '2020-01': -0.08245755860953918, '2020-02': -0.04405286343612335, '2020-03': -0.06451612903225801, '2020-04': 0.04729064039408871, '2020-05': -0.005644402634054609, '2020-06': 0.0009460737937558928} # print(Mincomes['600010']['2017-07']) # 0.28310502283105027 # 读取上证指数月度数据 # 市场月度收益率已在xlsx中计算完成 直接读取 # 市场月度收益率 = (本月收盘指数-上月收盘指数)/上月收盘指数 print("\n开始读取上证指数月度数据") Mincomes_market = {} fileName = os.getcwd() + "\\上证指数月度数据.xlsx" df = pd.DataFrame(pd.read_excel(fileName)) for i in range(len(df)): if(i>0): # 跳过2015-01 month = df.Month[i] income = df.Mincome[i] Mincomes_market[month] = income print("读取完成") # print(Mincomes_market) # print(Mincomes_market['2017-07']) # 0.025247562434473726 # 计算每只股票的超额收益率 AR # 股票超额收益率 = 股票月度收益率-市场月度收益率 print("\n开始计算每只股票的超额收益率") Abnormal = {} # 结构同 Mincomes for i in range(len(stkcds)): s = stkcds[i] s = str(s) Abnormal_one_stock = {} Abnormal_one_stock.clear() for j in range(len(months)): if(j>0): m = months[j] if(m in Mincomes[s]): mincome1 = Mincomes[s][m] mincome_market1 = Mincomes_market[m] a = mincome1 - mincome_market1 Abnormal_one_stock[m] = a Abnormal[s] = Abnormal_one_stock print("计算完成") # print(Abnormal['600000']) # print(Abnormal['600010']['2017-07']) # 0.25785746039657653 # 选取 5% 还是 10% 作为输家/赢家组合 # select = 0.05 # select = 0.1 # 形成期 # formation_period = ['2016-01','2016-02','2016-03','2016-04','2016-05','2016-06','2016-07','2016-08','2016-09','2016-10','2016-11','2016-12'] # formation_period = ['2016-01','2016-02','2016-03','2016-04','2016-05','2016-06','2016-07','2016-08','2016-09','2016-10','2016-11','2016-12', # '2017-01','2017-02','2017-03','2017-04','2017-05','2017-06'] # formation_period = ['2015-02','2015-03','2015-04','2015-05','2015-06','2015-07','2015-08','2015-09','2015-10','2015-11','2015-12', # '2016-01','2016-02','2016-03','2016-04','2016-05','2016-06','2016-07'] # formation_period = ['2015-02','2015-03','2015-04','2015-05','2015-06','2015-07','2015-08','2015-09','2015-10','2015-11','2015-12', # '2016-01','2016-02','2016-03','2016-04','2016-05','2016-06','2016-07','2016-08','2016-09','2016-10','2016-11','2016-12', # '2017-01','2017-02','2017-03','2017-04','2017-05','2017-06'] # formation_period = ['2015-02','2015-03','2015-04','2015-05','2015-06','2015-07','2015-08','2015-09','2015-10','2015-11','2015-12', # '2016-01','2016-02','2016-03','2016-04','2016-05','2016-06','2016-07','2016-08','2016-09','2016-10','2016-11','2016-12'] # formation_period = ['2015-07','2015-08','2015-09','2015-10','2015-11','2015-12', # '2016-01','2016-02','2016-03','2016-04','2016-05','2016-06','2016-07','2016-08','2016-09','2016-10','2016-11','2016-12'] # formation_period = ['2015-08','2015-09','2015-10','2015-11','2015-12', # '2016-01','2016-02','2016-03','2016-04','2016-05','2016-06','2016-07','2016-08','2016-09','2016-10','2016-11','2016-12', # '2017-01','2017-02','2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12'] # formation_period = ['2017-01','2017-02','2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12'] # formation_period = ['2017-01','2017-02','2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12', # '2018-01','2018-02','2018-03','2018-04','2018-05','2018-06'] # 检验期 # test_period = ['2017-01','2017-02','2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12', # '2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', # '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06','2019-07','2019-08','2019-09','2019-10','2019-11','2019-12', # '2020-01','2020-02','2020-03','2020-04','2020-05','2020-06'] # test_period = ['2017-01','2017-02','2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12', # '2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12'] # test_period = ['2017-07','2017-08','2017-09','2017-10','2017-11','2017-12', # '2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', # '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06'] # test_period = ['2017-01','2017-02','2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12', # '2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', # '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06','2019-07','2019-08','2019-09','2019-10','2019-11','2019-12'] # test_period = ['2017-01','2017-02','2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12', # '2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', # '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06','2019-07','2019-08','2019-09','2019-10','2019-11','2019-12', # '2020-01','2020-02','2020-03','2020-04','2020-05','2020-06'] # test_period = ['2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12', # '2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', # '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06','2019-07','2019-08','2019-09','2019-10','2019-11','2019-12', # '2020-01','2020-02','2020-03','2020-04','2020-05','2020-06'] # test_period = ['2016-08','2016-09','2016-10','2016-11','2016-12', # '2017-01','2017-02','2017-03','2017-04','2017-05','2017-06'] # test_period = ['2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', # '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06'] # test_period = ['2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', # '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06','2019-07','2019-08','2019-09','2019-10','2019-11','2019-12', # '2020-01','2020-02','2020-03','2020-04','2020-05','2020-06'] # test_period = ['2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', # '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06','2019-07','2019-08','2019-09','2019-10','2019-11','2019-12', # '2020-01','2020-02','2020-03','2020-04','2020-05','2020-06'] # 最优结果 1 formation_period = ['2016-01','2016-02','2016-03','2016-04','2016-05','2016-06','2016-07','2016-08','2016-09','2016-10','2016-11','2016-12'] test_period = ['2017-01','2017-02','2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12', '2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06','2019-07','2019-08','2019-09','2019-10','2019-11','2019-12', '2020-01','2020-02','2020-03','2020-04','2020-05','2020-06'] select = 0.05 # 最优结果 2 # formation_period = ['2015-08','2015-09','2015-10','2015-11','2015-12', # '2016-01','2016-02','2016-03','2016-04','2016-05','2016-06','2016-07','2016-08','2016-09','2016-10','2016-11','2016-12', # '2017-01','2017-02','2017-03','2017-04','2017-05','2017-06','2017-07','2017-08','2017-09','2017-10','2017-11','2017-12'] # test_period = ['2018-01','2018-02','2018-03','2018-04','2018-05','2018-06','2018-07','2018-08','2018-09','2018-10','2018-11','2018-12', # '2019-01','2019-02','2019-03','2019-04','2019-05','2019-06'] # select = 0.1 formation = len(formation_period) test = len(test_period) split = test_period[0] print("\n形成期:" + formation_period[0] + " ~ " + formation_period[-1] + " 共"+str(formation)+"个月") print("检验期:" + test_period[0] + " ~ " +test_period[-1]+ " 共"+str(test)+"个月") period = str(formation)+"_"+str(test)+"_"+split+"_"+str(select) # 取形成期和检验期可用股票的交集 sf = stkcds_every_month[formation_period[0]] # 形成期可用股票 # print(len(sf)) for i in range(formation): mtemp = formation_period[i] stemp = stkcds_every_month[mtemp] sf = list(set(stemp).intersection(set(sf))) # print(len(sf)) st = stkcds_every_month[test_period[0]] # 检验期可用股票 # print(len(st)) for i in range(test): mtemp = test_period[i] stemp = stkcds_every_month[mtemp] st = list(set(stemp).intersection(set(st))) # print(len(st)) st = list(set(sf).intersection(set(st))) # 取交集 sp = st # print("\n") # print(len(sp)) # 根据得到的股票 计算形成期的累计超额收益率 记录检验期的超额收益率 stocks_this_period = [] # 格式是一个包含若干dict的list for i in range(len(sp)): one_stock = {} one_stock.clear() s = sp[i] one_stock['Stkcd'] = s one_stock['Abnormal_formation'] = 0 # 形成期的累计超额收益率 for j in range(formation): mj = formation_period[j] one_stock['Abnormal_formation'] = one_stock['Abnormal_formation']+ Abnormal[s][mj] one_stock['Abnormal_test'] = [] # 检验期每个月的超额收益率 for j in range(test): mj = test_period[j] one_stock['Abnormal_test'].append(Abnormal[s][mj]) stocks_this_period.append(one_stock) # 按形成期的累计超额收益率从小到大排序 stocks_this_period = sorted(stocks_this_period, key=lambda k: k['Abnormal_formation']) l = len(stocks_this_period) # 排名前10%(5%)为输家组合 后10%(5%)为赢家组合 # num = int(0.1*l) # num = int(0.05*l) num = int(select*l) loser = stocks_this_period[0:num] winner = stocks_this_period[l-num:l] # 将一个投资组合S及相关指标写入EXCEL def write_Excel(S,name,period): filepath = (os.getcwd()+"\\分组\\"+period) isExists=os.path.exists(filepath) if not isExists: os.makedirs(filepath) excelpath = (os.getcwd()+"\\分组\\"+period+"\\"+name+".xls") workbook = xlwt.Workbook(encoding='utf-8') sheet = workbook.add_sheet('Sheet1',cell_overwrite_ok=True) headlist=['Stkcd','Formation'] row=0 col=0 for head in headlist: sheet.write(row,col,head) col=col+1 n = len(S) for i in range(n): sheet.write(i+1,0,int(S[i]['Stkcd'])) sheet.write(i+1,1,float(S[i]['Abnormal_formation'])) # sheet.write(i+1,2,float(S[i]['Abnormal_test'])) workbook.save(excelpath) # 将输家组合和赢家组合写入xls write_Excel(loser,"loser",period) write_Excel(winner,"winner",period) # print(winner[0]) # print(Abnormal['600131']['2017-01']) # 计算一个股票在检验期每个月的累计超额收益率 并加入 one_stock 中 def CAR_stock(S): num = len(S) for i in range(num): one_stock = S[i] ar = one_stock['Abnormal_test'] CAR_test = [] CAR_test.clear() for j in range(test): length = j+1 car = 0 for t in range(length): car = car + ar[t] CAR_test.append(car) S[i]['CAR_test'] = CAR_test return S # 计算一个投资组合在检验期每个月的累计超额收益率 def CAR_portfolio(S): num = len(S) CAR = [] for i in range(test): car_point = 0 for j in range(num): car_point = car_point + S[j]['CAR_test'][i] car_point = car_point/num CAR.append(car_point) return CAR loser_num = len(loser) loser = CAR_stock(loser) loser_CAR = CAR_portfolio(loser) winner_num = len(winner) winner = CAR_stock(winner) winner_CAR = CAR_portfolio(winner) # for i in range(test): # print(str(loser_CAR[i])+" "+str(winner_CAR[i])) result = {'loser':loser_CAR,'winner':winner_CAR} result = pd.DataFrame(result) path = (os.getcwd()+"\\结果\\"+period+".xls") result.to_excel(path) """ Created on Wed Dec 16 01:17:48 2020 @author: gy """ # 画图 import matplotlib.pyplot as plt plt.plot(test_period, loser_CAR, color='blue') plt.plot(test_period, winner_CAR, color='red') plt.yticks(fontsize=8) plt.xticks(rotation=90,fontsize=5) t = "Formation: " + formation_period[0] + " ~ " + formation_period[-1] + " " + str(formation)+" months" + "\nTest: " + test_period[0] + " ~ " +test_period[-1]+ " "+str(test)+" months"+"\nSplit: "+split + " "+str(int(select*100))+"%" plt.title(t,fontsize=10) path = (os.getcwd()+"\\结果\\"+period+".png") plt.savefig(path) plt.show()
18,260
0ec1ce6e3c61c7be357fae78ee786c374a0a4cd4
from app import db from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from flask.ext.security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin, login_required from flask import session, redirect, current_app from app import app class User(db.Model): id = db.Column(db.Integer, primary_key=True) nickname = db.Column(db.String(64), index=True, unique=True) email = db.Column(db.String(120), index=True, unique=True) password = db.Column(db.String(200), index=True, unique=False) posts = db.relationship('Post', backref='author', lazy='dynamic') @property def is_authenticated(self): return True @property def is_active(self): return True @property def is_anonymous(self): return False def get_token(self, expiration=1800): s = Serializer(current_app.config['SECRET_KEY'], expiration) return s.dumps({'user': self.id}).decode('utf-8') @staticmethod def verify_token(token): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except: return None id = data.get('user') if id: return User.query.get(id) return None def get_id(self): try: return unicode(self.id) # python 2 except NameError: return str(self.id) # python 3 def __repr__(self): return '<User %r>' % (self.nickname) class Post(db.Model): id = db.Column(db.Integer, primary_key = True) body = db.Column(db.String(140)) timestamp = db.Column(db.DateTime) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) def __repr__(self): return '<Post %r>' % (self.body) security = Security() user_datastore = SQLAlchemyUserDatastore(db,User,Post) security.init_app(app,user_datastore)
18,261
97ede2081b748ed4620bce645a3de1f20f39b7fd
#!/usr/bin/env python """ Demonstrate how to set options shared by all Tk widgets (font, colors, etc.) There are two methods, either read options from a file, or set options in the code. """ import Tkinter, Pmw, os, sys def makeGUI(top): Tkinter.Label(text='Demonstration of\nfont and color options').pack(pady=5) list = Pmw.ScrolledListBox(top, label_text = 'my first Pwm widget', labelpos = 'n') list.pack(pady=5) Tkinter.Button(top, text='button 1').pack(pady=5) Tkinter.Button(top, text='button 2').pack(pady=5) # insert items: mylist = ('item1','item2','item3') for item in mylist: list.insert('end', item) def readOptions(): if not os.path.isfile('.tkoptions'): print 'no file .tkoptions in the current directory' return root.option_readfile('.tkoptions') def addOptions(): general_font = ('Helvetica', 14, 'bold') label_font = ('Times', 24, 'italic') listbox_font = ('Helvetica', 20, 'roman bold') # check which fonts that are actually available: import tkFont print 'label_font:',label_font,'\n realized as',\ tkFont.Font(font=label_font).actual() print 'general_font:',general_font,'\n realized as',\ tkFont.Font(font=general_font).actual() print 'listbox_font:',listbox_font,'\n realized as',\ tkFont.Font(font=listbox_font).actual() root.option_add('*Font', general_font) root.option_add('*Foreground', 'black') root.option_add('*Label*Font', label_font) root.option_add('*Listbox*Font', listbox_font) root.option_add('*Listbox*Background', 'green') root.option_add('*Listbox*Foreground', 'brown') root = Tkinter.Tk() try: if sys.argv[1] == 'file': readOptions() else: addOptions() except: print 'fonts.py file|add'; sys.exit(1) # initialize Pmw, and make sure to use Tk option database: Pmw.initialise(root,useTkOptionDb=1) root.title('demonstrating font and color settings') # after the GUI is made, readOptions/addOptions have no effect makeGUI(root) #print root.option_get() root.mainloop()
18,262
4b6bc27fe29a7446d6015cdced42d19787ec8840
from django.apps import AppConfig default_app_config = 'management.log.LogConfig' class LogConfig(AppConfig): name = 'management.log'
18,263
5ef2cf6f0e2c5bbb890006d33f75568332d677fa
""" https://docs.google.com/spreadsheets/d/1kuJngdIQibZ15xWVDkV99Aofn7ATJuYbk8HzZqjv8jc/edit#gid=0 https://www.ns.sg/nsp/wcm/connect/9e1e31dc-cc14-46f1-83b2-3246fe2f8bbf/New+IPPT+Format+and+Scoring+System+for+Hometeam.pdf?MOD=AJPERES """ from pprint import pprint # from p_table import pushup_score_table string = """50 49 48 46 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 16 14 12 10 8 6 4 2 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 50 50 49 48 46 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 16 14 12 10 8 6 4 2 1 0 0 0 0 0 0 0 0 0 0 0 0 0 50 50 50 49 48 46 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 16 14 12 10 8 6 4 2 1 0 0 0 0 0 0 0 0 0 0 0 0 50 50 50 50 49 48 46 45 44 43 42 41 40 39 39 38 38 37 37 36 36 35 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 16 14 12 10 8 6 4 2 1 0 0 0 0 0 0 0 0 0 0 0 50 50 50 50 50 49 48 46 45 44 43 42 41 40 39 39 38 38 37 37 36 36 35 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 16 14 12 10 8 6 4 2 1 0 0 0 0 0 0 0 0 0 0 50 50 50 50 50 50 49 48 46 45 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 18 16 14 12 10 8 6 4 2 1 0 0 0 0 0 0 0 0 0 50 50 50 50 50 50 50 49 48 46 45 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 18 16 14 12 10 8 6 4 2 1 0 0 0 0 0 0 0 0 50 50 50 50 50 50 50 50 49 48 46 45 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 35 34 33 32 31 30 29 28 27 26 25 24 23 22 20 18 16 14 12 10 8 6 4 2 1 0 0 0 0 0 0 0 50 50 50 50 50 50 50 50 50 49 48 47 46 45 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 35 34 33 32 31 30 29 28 27 26 25 24 22 20 18 16 14 12 10 8 6 4 2 1 0 0 0 0 0 0 50 50 50 50 50 50 50 50 50 50 49 48 47 46 45 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 35 34 33 32 31 30 29 28 27 26 25 24 22 20 18 16 14 12 10 8 6 4 2 1 0 0 0 0 0 50 50 50 50 50 50 50 50 50 50 50 49 48 47 46 45 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 35 34 33 32 31 30 29 28 27 26 25 24 22 20 18 16 14 12 10 8 6 4 2 1 0 0 0 0 50 50 50 50 50 50 50 50 50 50 50 50 49 48 47 46 45 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 35 34 33 32 31 30 29 28 27 26 25 24 22 20 18 16 14 12 10 8 6 4 2 1 0 0 0 50 50 50 50 50 50 50 50 50 50 50 50 50 49 48 47 46 45 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 35 34 33 32 31 30 29 28 27 26 25 24 22 20 18 16 14 12 10 8 6 4 2 1 0 0 50 50 50 50 50 50 50 50 50 50 50 50 50 50 49 48 47 46 45 44 43 42 41 40 40 39 39 38 38 37 37 36 36 35 35 35 34 33 32 31 30 29 28 27 26 25 24 22 20 18 16 14 12 10 8 6 4 2 1 0""" score_table = [] for line in string.split("\n"): age_group_score = line.split("\t") age_group_score.reverse() age_group_score = [int(x) for x in age_group_score] score_table.append(age_group_score) pprint(score_table) # print(score_table == pushup_score_table)
18,264
a172c3ee667e3681f42189c6e758e226cc265986
import numpy as np # Python comes with its own math module that works on Python native objects. Unfortunately, # if you try to use math.cos on a NumPy array, it will not work, as the math # functions are meant to operate on elements and not on lists or arrays. Hence, NumPy # comes with its own set of math tools # NumPy arrays do not behave like matrices in linear algebra by default. Instead, the # operations are mapped from each element in one array onto the next. This is quite # a useful feature, as loop operations can be done away with for efficiency. But what # about when transposing or a dot multiplication are needed? Without invoking other # classes, you can use the built-in numpy.dot and numpy.transpose to do such operations # # 3x + 6y − 5z = 12 # x − 3y + 2z = −2 # 5x − y + 4z = 10 # Now let us represent the matrix system as AX = B, and solve for the variables. This # means we should try to obtain X = A−1B. Here is howwe would do this withNumPy. # import numpy as np # Defining the matrices A = np.matrix([[3, 6, -5], [1, -3, 2], [5, -1, 4]]) B = np.matrix([[12], [-2], [10]]) # Solving for the variables, where we invert A X = A ** (-1) * B print(X) # matrix([[ 1.75], # [ 1.75], # [ 0.75]]) # The solutions for the variables are x = 1.75, y = 1.75, and z = 0.75. You can easily check # this by executing AX, which should produce the same elements defined in B # Now that we understand howNumPy matrices work, we can show how to do the same # operations without specifically using the numpy.matrix subclass. (The numpy.matrix # subclass is contained within the numpy.array class, which means that we can do the # same example as that above without directly invoking the numpy.matrix class.) First, the NumPy array is the standard for using nearly anything in # the scientific Python environment, so bugs pertaining to the linear algebra operations # will be less frequent than with numpy.matrix operations. Sticking with # one data structure will lead to fewer headaches and less worry than switching between # matrices and arrays. It is advisable, then, to use numpy.array whenever possible. a = np.array([[3, 6, -5], [1, -3, 2], [5, -1, 4]]) # Defining the array b = np.array([12, -2, 10]) # Solving for the variables, where we invert A x = np.linalg.inv(a).dot(b) print(x) # array([ 1.75, 1.75, 0.75])
18,265
dcc94190d672280e150330f0b3102a3facdd2a44
""" text to video """ import subprocess import argparse import sys import logging _log = logging.getLogger(__name__) def build_ffmpeg_cmd(txt, audio_path, output_path): filter_graph = ';'.join([ 'mandelbrot=s=720x720[mandel]', '[0:a]showwaves=s=720x720:mode=line[wav]', '[wav]format=rgba,colorchannelmixer=aa=0.6[wavalpha]', '[mandel][wavalpha]overlay[vid]', '[vid]drawtext=text="{}":fontfile=/usr/share/fonts/naver-nanum/NanumGothic.ttf:fontcolor=white:fontsize=30:x=(w-text_w)/5:y=(h-text_h)/5[out]'.format(txt.replace('"', '\\"')), ]) return [ 'ffmpeg', '-i', audio_path, '-filter_complex', filter_graph, '-map', '[out]', '-map', '0:a', '-c:v', 'libx264', '-c:a aac', '-pix_fmt', 'yuv420p', '-shortest', output_path, ] def main(txt, audio_path, output_path): cmd = build_ffmpeg_cmd(txt, audio_path, output_path) _log.info('Executing command: %s', cmd) subprocess.run(cmd) if __name__ == '__main__': logging.basicConfig(level='DEBUG', format='%(asctime)s %(levelname)s %(message)s') parser = argparse.ArgumentParser() parser.add_argument('audio') parser.add_argument('output') parser.add_argument('txt', default=sys.stdin, type=argparse.FileType('r'), nargs='?') args = parser.parse_args() main(args.txt.read(), args.audio, args.output)
18,266
0c865a2dff7a7b9868d90e8ce15590d22e4ce35d
from django.template.loader import get_template from django.urls import reverse from commerce import settings as commerce_settings from commerce.managers import PaymentManager as CommercePaymentManager class PaymentManager(CommercePaymentManager): def get_payment_url(self): return reverse('commerce:orders') def render_payment_button(self): template = get_template('commerce/payment_button_wire_transfer.html') return template.render({'order': self.order}) def render_payment_information(self): template = get_template('commerce/payment_information_wire_transfer.html') return template.render({'order': self.order, 'commerce_settings': commerce_settings})
18,267
67dc54981574070a2123db6e1034249fd9e80c99
''' Card counting strategy is based on Hi-Lo ''' def count(deck): count = 0 for card in deck: if card[0].isalpha(): count -= 1 else: num = int(card[0]) if num == 1: count -= 1 elif num != 1 and num < 7: count += 1 if count > 0: return '+' + str(count) else: return count
18,268
e122ea9fbac710355c5f975849e759f22f7c7a71
def VapO2(P,T,x_N2): x = (P-5.44671220e+02)/2.47804900e-01 y = (T--1.77974655e+02)/7.88210000e-03 z = (x_N2-9.93714215e-01)/1.07156015e-03 output = \ 1*-5.70904190e+01+\ z*5.71250785e+01+\ y*5.79503513e+01+\ x*-4.36281367e+01 y_O2 = output*2.92472581e-05+1.03111540e-04 return y_O2
18,269
fcd1e255342b28542e045d03c3cac0d8ab8fcb40
NUM = int (input("introduce un numero")) if NUM > 0: print (f"{NUM} es positivo") elif NUM == 0: print (f"{NUM} es nulo") else: print (f"{NUM} es negativo") print ("fin del programa")
18,270
d54c098f851ed36e83cf8e7d4a4489db2cc004ce
# -*- coding: utf-8 -*- """ Created on Mon Mar 12 17:41:29 2018 @author: Administrator """ import sys from PyQt4 import QtCore,QtGui,uic import views import Global import chardet import workmanage qtCreatorFile = "UI.ui" Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile) #我的主界面类,继承于QMainWindow class MyApp(QtGui.QMainWindow, Ui_MainWindow): def __init__(self): #分别对QMainWindow与我的MainWindow进行初始化 QtGui.QMainWindow.__init__(self) Ui_MainWindow.__init__(self) self.setupUi(self) #分别设置按钮点击后对应的槽函数 self.addpushButton.clicked.connect(self.add) self.clearButton.clicked.connect(self.clear) self.startButton.clicked.connect(self.start) self.stopButton.clicked.connect(self.stop) self.help_() self.timer=QtCore.QTimer() self.timer.timeout.connect(self.Update) self.timer.start(200) #实现将所需的Url加入 def add(self): url=self.URLEdit.text() self.textEdit.append("已添加:".decode('utf-8')+url) self.URLEdit.clear() views.filllist(url) #清除已输入的信息 def clear(self): self.tidEdit.clear() self.portsEdit.clear() self.exclude_portsEdit.clear() self.numEdit.clear() self.URLEdit.clear() self.textEdit.clear() self.timesEdit.text() #开始进行 def start(self): Global.settidValue(self.tidEdit.text()) if not Global.gettidValue(): Global.instatesinf("您未输入订单号!".decode('utf-8')) return Global.setoperatorValue(self.operatorcomboBox.currentText()) Global.setportsValue(self.portsEdit.text()) Global.setexclude_portsValue(self.exclude_portsEdit.text()) Global.setprotocolValue(self.protocolcomboBox.currentText()) Global.setcategoryValue(self.categorycomboBox.currentText()) Global.setsortbyValue(self.sortbycomboBox.currentText()) Global.setnumValue(self.numEdit.text()) Global.settimesValue(self.timesEdit.text()) Global.setspeedValue(self.speedcomboBox.currentText()) Global.setapiValue() Global.thread_mark=1 workmanage.start() #显示help文档信息 def help_(self): f=open("help.txt","r") text=f.read() f.close() type_=chardet.detect(text) self.textEdit.append(text.decode(type_["encoding"])) #暂停所有线程 def stop(self): Global.thread_mark=0 #实时显示当前状态信息 def Update(self): global message_queue text=Global.outstatesinf() if text: self.textEdit.append(text) if __name__ == "__main__": app = QtGui.QApplication(sys.argv) window = MyApp() window.show() sys.exit(app.exec_())
18,271
b708af3dd30dc723604ba1bec6e068f3ceeae1cf
from keras.datasets import mnist from keras import models from keras import layers from keras.utils import to_categorical # loading train data set (train_images, train_labels), (test_images, test_labels) = mnist.load_data() # Preparing the image data # * what is the function of reshape? # print(train_images.shape) train_images = train_images.reshape((train_images.shape[0], -1)) train_images = train_images.astype('float') / 255 # print(train_images.shape) test_images = test_images.reshape((test_images.shape[0], -1)) test_images - test_images.astype('float') / 255 # Preparing the labels train_labels = to_categorical(train_labels) test_labels = to_categorical(test_labels) # Defining the model network = models.Sequential() # Adding layers network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28, ))) network.add(layers.Dense(10, activation='softmax')) # summary network.summary() # Compiling network.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # Training network.fit(train_images, train_labels, epochs=5, batch_size=256 ) # Evaluating test_loss, test_acc = network.evaluate(test_images, test_labels) print('test_acc', test_acc)
18,272
26a6cf92fdf980aa126621a9f678fc4fc1a3d048
def calculate(inputNum): X = int(inputNum) lst = [X] while X != 1: if X % 2 == 0: X = int(X / 2) lst.append(X) else: X = int((3 * X) + 1) lst.append(X) return lst while True: inputString = input() if inputString == "0 0": break else: a, b = str(inputString).split(" ") aLst = calculate(int(a)) bLst = calculate(int(b)) intersectionBetweenBoth = set(aLst) & set(bLst) c = 0 d = 0 result = 0 for (i, numInA) in enumerate(aLst): if numInA in intersectionBetweenBoth: c = str(i) result = str(numInA) break for (i, numInB) in enumerate(bLst): if numInB in intersectionBetweenBoth: d = str(i) break print (a + " needs " + c + " steps, " + b + " needs " + d + " steps, they meet at " + result)
18,273
6d8aaefab327f915144d6813d70e549a72678a84
from Pages.Search.SearchPage import SearchPage import unittest import pytest import time from Pages.Add_Remove_Songs_Playlist.AddRemoveSongsPlaylistsPage import AddRemoveSongsPlaylistsPage @pytest.mark.usefixtures("oneTime_Login_SetUp", "login_set_up") class TestAddRemove(unittest.TestCase): @pytest.fixture(autouse=True) def classSetup(self, oneTime_Login_SetUp): self.Search = SearchPage(self.driver) self.Add = AddRemoveSongsPlaylistsPage(self.driver) def test_a_search_add_song(self): time.sleep(1) self.Search.click_search_window() self.Search.send_in_search_tab("sting ya king") time.sleep(2) self.Search.click_searched_song() time.sleep(1) self.Add.click_options() self.Add.open_my_playlist() def test_b_add_from_playlist(self): time.sleep(1) self.Add.open_my_playlist() time.sleep(1) self.Add.click_refresh() time.sleep(1) self.Add.click_add_button() time.sleep(1) def test_c_remove_song(self): time.sleep(1) self.Add.element_click("Test", "link") self.Add.open_my_playlist() self.Add.remove_song() def test_d_add_from_upper_options(self): time.sleep(1) self.Search.click_search_window() self.Search.send_in_search_tab("sting ya king") time.sleep(2) self.Search.click_searched_song() time.sleep(2) self.Add.add_playlist_from_upper_options() time.sleep(1) def test_e_home_song(self): time.sleep(1) self.Add.click_home() self.Add.click_target_playlist() self.Add.get_song_home_playlist() time.sleep(1)
18,274
a95b5f787003b9970ddc0fbf2138d4234c2a93bf
import torch import SimpleITK as sitk from pathlib import Path from data.dataset import CSIDataset from torch.utils.data import Dataset, DataLoader crop_img = '../crop_isotropic_dataset' batch_size = 1 train_dataset = CSIDataset(crop_img) train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True) img_patch, ins_patch, gt_patch, weight, c_label = next(iter(train_dataloader)) img_patch = torch.squeeze(img_patch) ins_patch = torch.squeeze(ins_patch) gt_patch = torch.squeeze(gt_patch) weight = torch.squeeze(weight) assert img_patch.shape == (128, 128, 128) assert ins_patch.shape == (128, 128, 128) assert gt_patch.shape == (128, 128, 128) assert weight.shape == (128, 128, 128) # store patches for visualization Path('./samples/').mkdir(parents=True, exist_ok=True) sitk.WriteImage(sitk.GetImageFromArray(img_patch.numpy()), './samples/img.nrrd', True) sitk.WriteImage(sitk.GetImageFromArray(gt_patch.numpy()), './samples/gt.nrrd', True) sitk.WriteImage(sitk.GetImageFromArray(ins_patch.numpy()), './samples/ins.nrrd', True) sitk.WriteImage(sitk.GetImageFromArray(weight.numpy()), './samples/wei.nrrd', True)
18,275
feb5ce22ddf24f89188b7c1dd412f4a02d9c3851
import typing as th # Literals are available for python>=3.8 from sklearn.base import BaseEstimator, ClassifierMixin import numpy as np class NaiveBayes(BaseEstimator, ClassifierMixin): def __init__(self, kind): self.kind = kind def fit(self, X, Y): self.classes_num = max(Y) self.class_priors = self.cal_class_priors(Y) if self.kind == "gaussian": self.means, self.stds = self.cal_means_variances(X, Y) else: self.thresholds, self.conditional_probs = self.cal_thresholds_conditional_probs(X, Y) def cal_thresholds_conditional_probs(self, X, Y): X = np.array([np.array(x) for x in X]) Y = np.array(Y) conditional_probs = np.zeros([len(X[0]), self.classes_num + 1]) total_class = np.zeros([len(X[0]), self.classes_num + 1]) positive_class = np.zeros([len(X[0]), self.classes_num + 1]) # thresholds = sum(X) / len(X) thresholds = self.cal_thresholds(X, Y) for i in range(len(X)): for j in range(len(X[0])): total_class[j, Y[i]] += 1 if (X[i][j] > thresholds[j]): positive_class[j][Y[i]] += 1 return thresholds, positive_class / total_class def cal_thresholds(self, X, Y): thresholds = np.zeros(len(X[0])) for i in range(len(X[0])): x = X[:,i] thresholds[i] = self.best_threshold(x, Y) return thresholds def best_threshold(self, X, Y): sorted_index = sorted([i for i in range(len(X))], key=lambda i: X[i]) n = len(Y) x = X[sorted_index] y = Y[sorted_index] candidates = [] for i in range(1, len(Y)): if Y[i] != Y[i-1]: candidates.append(i) candidates = np.random.choice(candidates, 10 , replace=False) best_ant = 100000 best_ant_index = 0 for candidate in candidates: temp_ant = (self.antropy(y[:candidate]) / candidate + self.antropy(y[candidate:]) / (n-candidate)) * n if temp_ant < best_ant: best_ant = temp_ant best_ant_index = candidate return x[best_ant_index] def antropy(self, Y): n = len(Y) ones = sum(Y == 1) if ones == 0: return 0 return -(ones / n) * np.log2(ones / n) def cal_means_variances(self, X, Y): # calculates means of gaussian p(t|c) means = np.zeros([len(X[0]), self.classes_num + 1]) stds = np.zeros([len(X[0]), self.classes_num + 1]) for i in range(len(X[0])): for c in range(len(means[0])): features = [] for j in range(len(X)): if Y[j] == c: features.append(X[j][i]) means[i][c] = np.mean(features) stds[i][c] = np.std(features) return means, stds def cal_class_priors(self, y): classes_num = [0] * (self.classes_num + 1) for i in y: classes_num[i] += 1 n = len(y) return [classes_num[i] / n for i in range(len(classes_num))] def predict(self, X): if self.kind == "gaussian": return self.pred_gaussian(X) else: return self.pred_bernouli(X) def pred_gaussian(self, X): pred = [] constant = 1 / np.sqrt(2 * np.pi) for x in X: class_probs = [] for c in range(self.classes_num + 1): term_probs = [constant / self.stds[i][c] * np.exp(-(x[i] - self.means[i][c])**2 / 2 / self.stds[i][c]**2) for i in range(len(x))] prob = np.log(self.class_priors[c]) for p in term_probs: if p != 0: prob += np.log(p) else: prob -= 1000 class_probs.append(prob) pred.append(np.argmax(class_probs)) return pred def pred_bernouli(self, X): pred = [] for x in X: x_bernoulli = [0] * len(x) for i in range(len(x)): if x[i] >= self.thresholds[i]: x_bernoulli[i] = 1 class_probs = [] for c in range(self.classes_num + 1): term_probs = [self.conditional_probs[i][c] if x_bernoulli[i] == 1 else 1 - self.conditional_probs[i][c] for i in range(len(x_bernoulli)) ] prob = np.log(self.class_priors[c]) for p in term_probs: if p != 0: prob += np.log(p) else: prob -= 1000 class_probs.append(prob) pred.append(np.argmax(class_probs)) return pred
18,276
07febc4d73cbc1aee69113f906bc135c140387c7
# transfer message from front to C import socket class Client(object): # 从前端传回的数据中应包含 tcp server 的 ip 和 port def __init__(self, host, port): self.host = host self.port = port def send_msg_to_c(self, data): """ 从前端发的数据直接通过这个方法转发到 c 程序, 发送完成即主动关闭连接.设置连接超时为 6 秒 :param data: 要发送的数据 :return: """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket: client_socket.settimeout(6) client_socket.connect((self.host, self.port)) client_socket.sendall(data) client_socket.close() client_socket.settimeout(None)
18,277
8381bd255ad7eaaf0a5546fbc813fa1746a2ebec
from bs4 import BeautifulSoup import os import datetime import getpass, os, imaplib, email import re import mysql.connector def filterString(str): str= str.strip() return re.sub("[\n\r' ]*","",str) # method to get the content from a given string def getContentFromString(mailbody): # msg_key = mailbody.values()[mailbody.keys().index("Message-ID")] # print(msg_key) name='' email='' phone='' message='' url='' emailDate='' emailTo='' emailFrom='' tempDate = mailbody.values()[mailbody.keys().index("Date")] emailDate = datetime.datetime.strptime(tempDate, "%a, %d %b %Y %H:%M:%S %z").strftime('%Y-%m-%d %H:%M:%S') emailTo = mailbody.values()[mailbody.keys().index("To")] # print(emailTo) emailFrom = mailbody.values()[mailbody.keys().index("From")] # print(emailFrom) splitstring = mailbody.keys()[-1]+": "+mailbody.values()[-1] content_str= str(mailbody).split(splitstring)[1] content_html = BeautifulSoup(content_str,"html.parser") content = (content_html.findAll("td")) # print(content_html) for i in range(0,len(content)): content_value = content[i].text if(content_value == "Name:"): name = content[i+1].text elif(content_value == "Email:"): email = content[i+1].text elif(content_value == "Phone:"): phone = content[i+1].text elif(content_value == "Message:"): message = content[i+1].text elif(content_value == "URL:"): url = content[i+1].text; # print(name) # print(email) # print(phone) # print(message) # print(url) sql = ( "INSERT INTO leads1 (name, email, phone, message, url,emaildate,emailto,emailfrom) VALUES ('" + filterString(name) + "','" + filterString(email) + "','" + filterString(phone) + "','" + filterString(message) + "','" + filterString(url) + "','" + filterString(str(emailDate)) + "','" + filterString(emailTo) + "','" + filterString(emailFrom) + "')" ) print(sql) try: if(url!=''): sqlcursor.execute(sql) mydb.commit() else: print(content) with open("emptyurls2.csv","a+")as f: f.write(str(content_str)+",\n") f.close() # exit() except Exception as e: print(e) exit() # getting messages from the mail def getMessages(): typ, messages = conn.search(None, '(FROM "MHB" SUBJECT "%s")' % subject) for i in messages[0].split(): type1, message = conn.fetch(i, "RFC822") m = email.message_from_bytes(message[0][1]) try: status = getContentFromString(m) # input() except Exception as e: print("in excepion") print(e) # =================================================== # creating the mysql db connetion to record contents of mails mydb = mysql.connector.connect( host="localhost", user="root", passwd="", database="manipalgmail" ) sqlcursor = mydb.cursor(dictionary=True) # ====================================================== # credentials of server servername = "imap.gmail.com" usernm = "mounika@multipliersolutions.in" passwd = "9030146678" subject = "Manipal" # establishing connection using imap conn = imaplib.IMAP4_SSL(servername) conn.login(usernm, passwd) conn.select("Inbox") getMessages()
18,278
bcfa0c0efd5b992464496524f31d19c38c25281e
from flask_restful import Resource from flask import session, jsonify, request import leoObject class setNetworks(Resource): def __init__(self): self.gLeonardo = leoObject.getLeoObject() def post(self): # print "setNetworks LEO Mem=", self.gLeonardo # print "ID = ", request.json['id'], "REQ->", request.json jsonDict = request.json dictNewSettings = jsonDict['params'][0] # print "dictNewSettings->", dictNewSettings return jsonify( self.gLeonardo.directory.getNetworkManager().setNetworks(dictNewSettings) )
18,279
6ef756b3f14ee3dfcdb2175e1e84de607692a1bc
#!/usr/bin/env python3 """Project Euler - Problem 33 Module""" from fractions import Fraction def problem33(nr_digits): """Problem 33 - Digit canceling fractions""" a_sum = 1 b_sum = 1 for a in range(10**(nr_digits - 1), (10**nr_digits) - 1): for b in range(a + 1, 10**nr_digits): # print(a/b); a_str = str(a) b_str = str(b) for sa in range(0, nr_digits): if a_str[sa] == '0': continue # check in b for sb in range(0, nr_digits): if (a_str[sa] == b_str[sb]): a_short = a_str[:sa] + a_str[sa + 1:] b_short = b_str[:sb] + b_str[sb + 1:] a_short_int = int(a_short) b_short_int = int(b_short) if (b_short_int == 0): continue if (a / b == a_short_int / b_short_int): a_sum *= a_short_int b_sum *= b_short_int #print(a_str, "/", b_str, "->", # a_short, "/", b_short) return Fraction(a_sum, b_sum).denominator def run(): """Default Run Method""" return problem33(2) if __name__ == '__main__': print("Result: ", run())
18,280
eccc74d1e8de28e82ec900c8d6667356cf357434
#!/usr/bin/python # -*- coding: utf8 -*- import urllib, urllib2 import cookielib from bs4 import BeautifulSoup from lxml import etree import syslog from pymongo import MongoClient import time def getCycleNumber(): opener = urllib2.build_opener() f = opener.open("http://v2.empirium.net") html = etree.HTML(f.read()) result = etree.tostring(html, pretty_print=True, method="html") coord = result.find("[ Tour n") + 14 if coord < 14: return -1 return int(result[coord:coord+3]) def getPlayersList(): opener = urllib2.build_opener() f = opener.open("http://v2.empirium.net") html = etree.HTML(f.read()) result = etree.tostring(html, pretty_print=True, method="html") soup = BeautifulSoup(result) for selector in soup.findAll("select", attrs={'name':'joueur'}): out = [] for option in selector.findAll("option"): player = {} player['id'] = option['value'] player['name'] = option.contents[0].encode("utf8").strip("\t").strip("\n") out.append(player) return out def getShipLinks(cookies): opener2 = urllib2.build_opener() opener2.addheaders.append(('Cookie', "; ".join('%s=%s' % (cookie.name,cookie.value) for cookie in cookies))) f = opener2.open("http://v2.empirium.net/pan.php3?th=com") data = f.read() html = etree.HTML(data) result = etree.tostring(html, pretty_print=True, method="html") soup = BeautifulSoup(result) # Recuperation des liens vers les radars de tous les vaisseaux liste_liens_radars = [] for link in soup.findAll('a'): link_value = link.get('href') if link_value[1:15]=="/flotte/gauche": liste_liens_radars.append("http://v2.empirium.net" + link_value[1:]) return liste_liens_radars def getPlanetLinks(cookies): opener2 = urllib2.build_opener() opener2.addheaders.append(('Cookie', "; ".join('%s=%s' % (cookie.name,cookie.value) for cookie in cookies))) f = opener2.open("http://v2.empirium.net/pan.php3?th=pl") data = f.read() html = etree.HTML(data) result = etree.tostring(html, pretty_print=True, method="html") soup = BeautifulSoup(result) # Recuperation des liens vers les radars de tous les vaisseaux liste_liens_radars = [] for link in soup.findAll('a'): link_value = link.get('href') if link_value[1:16]=="/planete/gauche" and link_value.count("Impots") == 0: liste_liens_radars.append("http://v2.empirium.net" + link_value[1:]) return liste_liens_radars def getGroupDatas(cookies, link, playerName): # Recuperation du radar op3 = urllib2.build_opener() op3.addheaders.append(('Cookie', "; ".join('%s=%s' % (cookie.name,cookie.value) for cookie in cookies))) try: f3 = op3.open("http://v2.empirium.net" + link[1:]) except Exception, e: syslog.syslog("Error during getGroupDatas - player:%s - link:%s - error:%s" % (playerName, link, e)) d3 = f3.read() h3 = etree.HTML(d3) r3 = etree.tostring(h3, pretty_print=True, method="html") s3 = BeautifulSoup(r3) out = [] lordName = "Propriétaire inconnu" iCoord = link.find("&") + 1 coordX = link[iCoord:].split("&")[0].strip("X=") coordY = link[iCoord:].split("&")[1].strip("Y=") for element in s3.findAll("li"): el = {} idName = element.text.split(" - ") image = str(element.find("img")['src'])[2:] if str(idName[0][1:9]) == "Seigneur": iNameEnd = idName[0].find(" : ") lordName = idName[0][10:iNameEnd] else: ident = idName[0][1:] if len(idName) >= 2: name = idName[1] else: name = "" el["type"] = "Vaisseau" el["id"] = ident el["name"] = name el["image"] = image el["x"] = coordX el["y"] = coordY out.append(el) for e in out: e["owner"] = lordName return out def getDatas(cookies, link, playerName): # Recuperation du radar op3 = urllib2.build_opener() op3.addheaders.append(('Cookie', "; ".join('%s=%s' % (cookie.name,cookie.value) for cookie in cookies))) f3 = op3.open(link) d3 = f3.read() h3 = etree.HTML(d3) r3 = etree.tostring(h3, pretty_print=True, method="html") s3 = BeautifulSoup(r3) out = [] group_links = [] # Pour chaque element visible sur le radar for element in s3.findAll('div', attrs={"class":"carte_bulle"}): try: el = {} # Get element type (Fregate, Navette, # Planete, Groupe, ...) type_complet = element.find("h1").text if type_complet[0] != '\n': type = type_complet[0] else: type = type_complet[1] # Navette, Fregate and Croiseur processing if (type == 'N' or type == 'F' or type == 'C' or type == 'L'): ident = element.find("h1").contents[0].encode("utf8").split(" ")[1] name = element.find("h1").contents[2].encode("utf8") indexCoordStart = str(element).find("<img") indexCoordStart += str(element)[indexCoordStart:].find(">") + 1 indexCoordEnd = indexCoordStart + str(element)[indexCoordStart:].find("<br/>") - 10 coord = str(element)[indexCoordStart:indexCoordEnd].strip(" ").split("/") el["type"] = "Vaisseau" el["id"] = ident el["name"] = name el["image"] = element.find("img")['src'][2:] el["x"] = coord[0].strip(" ") el["y"] = coord[1].strip(" ") el["owner"] = element.find("a").contents[0].encode("utf8") if (el["owner"] == "Gérer" or el["owner"] == "Déplacer"): el["owner"] = playerName out.append(el) # Vortex processing elif type == 'V': indexID = str(element).find("Vortex ") + 7 indexIDend = indexID + str(element)[indexID:].find("<") ident = str(element)[indexID:indexIDend] indexDest = str(element).find("Vers ") + 5 indexDestend = indexDest + str(element)[indexDest:].find("<") dest = str(element)[indexDest:indexDestend] indexCoord = str(element).find("Coord. ") + 7 indexCoordend = indexCoord + str(element)[indexCoord:].find("<br") coord = str(element)[indexCoord:indexCoordend].strip(" ") el["type"] = "Vortex" el["id"] = ident el["destination"] = dest el["x"] = coord.split("/")[0] el["y"] = coord.split("/")[1] out.append(el) # Planete processing elif type == 'P': index = str(element.find("h1")).find("Planète") idName = str(element.find("h1"))[index:].strip("Planète ")[:-5].strip("\n").split("<br/>") owner = element.find("a").contents[0].encode('utf8') if (owner == "Gérer"): owner = playerName indexCoordStart = str(element).find("</h1>") + 5 indexCoordEnd = indexCoordStart + str(element)[indexCoordStart:].find("<br/>") - 10 coord = str(element)[indexCoordStart:indexCoordEnd] el["type"] = "Planete" el["id"] = idName[0] el["name"] = idName[1] el["image"] = element.find("h1").find("img")['src'][2:] el["x"] = coord.split("/")[0] el["y"] = coord.split("/")[1] el["owner"] = owner out.append(el) # In case of many many elements (> 10) elif type == 'E': iCoordEnd = element.find("</h1>") group_links.append(element.find("a")['href']) else: indexCoord = type_complet.find("en ") + 3 coord = type_complet[indexCoord:].strip(" ") coordX = coord.split("/")[0] coordY = coord.split("/")[1] for group in element.findAll("div", attrs={"class":"sousgroupe"}): for elmt in str(group).split("<br/><br/>"): el = {} elmt = elmt.replace("\n", " ") if elmt[1]=='d': elmt = elmt[24:] if elmt[2]!='/': if len(elmt) < 20: continue if elmt[19] == 'v': elmt = elmt[1:] if elmt[18:20] == 'vsx': index1 = elmt.find(">") index2 = index1 + elmt[index1:].find(" - ") id = elmt[index1+1:index2] index3 = index2 + elmt[index2:].find(" de <") if index3 < index2: index3 = index2 + elmt[index2:].find("<br/><a") nom = elmt[index2+3:index3] index4 = elmt.find("/") index5 = index4 + elmt[index4:].find("\"") type = elmt[index4:index5] index6 = elmt.find("\"gauche\">") + 9 index7 = index6 + elmt[index6:].find("</a>") owner = elmt[index6:index7] el["type"] = "Vaisseau" el["id"] = id el["name"] = nom el["image"] = type el["x"] = coordX el["y"] = coordY el["owner"] = owner if (el["owner"] == "Gérer" or el["owner"] == "Déplacer"): el["owner"] = playerName out.append(el) elif elmt[18:20] == "vor": index1 = elmt.find(">") index2 = index1 + elmt[index1:].find(" - ") id = elmt[index1+1:index2] index3 = index2 + elmt[index2:].find(" de <") if index3 < index2: index3 = index2 + elmt[index2:].find("<br/><a") nom = elmt[index2+3:index3] index4 = elmt.find("/") index5 = index4 + elmt[index4:].find("\"") type = elmt[index4:index5] index6 = elmt.find("\"gauche\">") + 9 index7 = index6 + elmt[index6:].find("</a>") owner = elmt[index6:index7] el["type"] = "Vortex" el["id"] = id el["destination"] = "" el["x"] = coordX el["y"] = coordY out.append(el) elif elmt[19] == 'p': index1 = elmt.find(">") index2 = index1 + elmt[index1:].find(" ") id = elmt[index1+4:index2] index3 = index2 + elmt[index2:].find("<") nom = elmt[index2+3:index3] index4 = elmt.find("/") index5 = index4 + elmt[index4:].find("\"") type = elmt[index4:index5] index6 = index3 + elmt[index3:].find("> de ") + 5 index7 = index6 + elmt[index6:].find("<") owner = elmt[index6:index7] el["type"] = "Planete" el["id"] = id el["name"] = nom el["image"] = type el["x"] = coordX el["y"] = coordY el["owner"] = owner out.append(el) except Exception, e: syslog.syslog("Error during ship radar parsing - player:%s - link:%s - element:%s - error:%s" % (playerName, link, element, e)) continue return out, group_links def getDatasPlanets(cookies, link, playerName): # Recuperation du radar op3 = urllib2.build_opener() op3.addheaders.append(('Cookie', "; ".join('%s=%s' % (cookie.name,cookie.value) for cookie in cookies))) f3 = op3.open(link) d3 = f3.read() h3 = etree.HTML(d3) r3 = etree.tostring(h3, pretty_print=True, method="html") s3 = BeautifulSoup(r3) out = [] # Pour chaque element visible sur le radar for element in s3.findAll('div', attrs={"class":"carte_bulle"}): el = {} # Recuperation du type de l'element (Fregate, Navette, # Planete, Groupe, ...) typeIdName = str(element.find("h1"))[4:-5] if typeIdName[0] == "N": ident = typeIdName.split("<br/>")[0][8:] name = typeIdName.split("<br/>")[1].strip("\n") image = str(element.find("img"))[12:-3] iCoord = str(element).find("\"/>") + 3 iCoordEnd = iCoord + str(element)[iCoord:].find("<br/>") - 10 coord = str(element)[iCoord:iCoordEnd] owner = element.find("a").contents[0] el["type"] = "Vaisseau" el["id"] = ident el["name"] = name el["image"] = image el["x"] = coord.split("/")[0].strip(" ") el["y"] = coord.split("/")[1].strip(" ") el["owner"] = owner out.append(el) elif typeIdName[0:2] == "Co": ident = typeIdName.split("<br/>")[0][10:] name = typeIdName.split("<br/>")[1].strip("\n") image = str(element.find("img"))[12:-3] iCoord = str(element).find("\"/>") + 3 iCoordEnd = iCoord + str(element)[iCoord:].find("<br/>") - 10 coord = str(element)[iCoord:iCoordEnd] owner = element.find("a").contents[0] el["type"] = "Vaisseau" el["id"] = ident el["name"] = name el["image"] = image el["x"] = coord.split("/")[0].strip(" ") el["y"] = coord.split("/")[1].strip(" ") el["owner"] = owner out.append(el) elif typeIdName[0:2] == "Cr": ident = typeIdName.split("<br/>")[0][9:] name = typeIdName.split("<br/>")[1].strip("\n") image = str(element.find("img"))[12:-3] iCoord = str(element).find("\"/>") + 3 iCoordEnd = iCoord + str(element)[iCoord:].find("<br/>") - 10 coord = str(element)[iCoord:iCoordEnd] owner = element.find("a").contents[0] el["type"] = "Vaisseau" el["id"] = ident el["name"] = name el["image"] = image el["x"] = coord.split("/")[0].strip(" ") el["y"] = coord.split("/")[1].strip(" ") el["owner"] = owner out.append(el) elif typeIdName[0] == "F": ident = typeIdName.split("<br/>")[0][8:] name = typeIdName.split("<br/>")[1].strip("\n") image = str(element.find("img"))[12:-3] iCoord = str(element).find("\"/>") + 3 iCoordEnd = iCoord + str(element)[iCoord:].find("<br/>") - 10 coord = str(element)[iCoord:iCoordEnd] owner = element.find("a").contents[0] el["type"] = "Vaisseau" el["id"] = ident el["name"] = name el["image"] = image el["x"] = coord.split("/")[0].strip(" ") el["y"] = coord.split("/")[1].strip(" ") el["owner"] = owner out.append(el) elif typeIdName[2:5] == "img": image = str(element.find("img"))[12:-3] iTI = str(element).find("\"/>") + 12 iTIEnd = iTI + str(element)[iTI:].find("<br/>") iNameEnd = iTIEnd + str(element)[iTIEnd:].find("</h1>") iCoordEnd = iNameEnd + str(element)[iNameEnd:].find("<br/>") - 10 ti = str(element)[iTI:iTIEnd] name = str(element)[iTIEnd+5:iNameEnd] coord = str(element)[iNameEnd+5:iCoordEnd] owner = element.find("a").contents[0] if owner.encode("utf-8") == "Gérer": owner = playerName el["type"] = "Planete" el["id"] = ti el["name"] = name el["image"] = image el["x"] = coord.split("/")[0].strip(" ") el["y"] = coord.split("/")[1].strip(" ") el["owner"] = owner out.append(el) else: data = typeIdName.split(" ") nb = data[0] x = data[3] y = data[5] i = 0 for e in element.findAll("div", attrs={"class":"sousgroupe"}): items = str(e).split("<br/><br/>") for item in items: if (len(item) >= 20): i += 1 iImage = item.find("/images/") iImageEnd = iImage + item[iImage:].find("\"/>") image = item[iImage:iImageEnd] if image[8] == 'v': iIdNameEnd = iImageEnd + item[iImageEnd:].find(" de <a") idName = item[iImageEnd+3:iIdNameEnd] iOwner = item.find("gauche\">") + 8 iOwnerEnd = iOwner + item[iOwner:].find("</a>") owner = item[iOwner:iOwnerEnd] el["type"] = "Vaisseau" el["id"] = idName.split("-")[0].strip(" ") el["name"] = idName.split("-")[1].strip(" ") el["image"] = image el["x"] = x el["y"] = y el["owner"] = owner out.append(el) elif image[8] == 'p': iIdNameEnd = iImageEnd + item[iImageEnd:].find("<a href") idName = item[iImageEnd+6:iIdNameEnd] iOwner = item.find("gauche\">") + 12 iOwnerEnd = iOwner + item[iOwner:].find("</a>") owner = item[iOwner:iOwnerEnd] if owner == "": owner = "Rebelles" el["type"] = "Planete" el["id"] = idName.split("-")[0].strip(" ") el["name"] = idName.split("-")[1].strip(" ") el["image"] = image el["x"] = x el["y"] = y el["owner"] = owner out.append(el) else: syslog.syslog("Error: element unknown during planet radar parsing - player:%s - link:%s - item:%s" % (playerName, link, item)) el = {} return out def getAllDatas(cookies, playerName, id_request, db_host, db_port): client = MongoClient(db_host, db_port) col_update = client['requests']['update'] cycle = getCycleNumber() post = {} post['player'] = playerName post['status'] = 'processing' post['analyze'] = 0 post['base'] = 0 post['groups'] = 0 post['cycle'] = cycle linksShips = getShipLinks(cookies) linksPlanets = getPlanetLinks(cookies) datas = [] group_links = [] total_count = len(linksShips) total_count += len(linksPlanets) for i in range(len(linksShips)): shipDatas, group_link = getDatas(cookies, linksShips[i], playerName) group_links.extend(group_link) datas.extend(shipDatas) percent = float(i+1)/total_count col_update.update({'id':id_request,'status':'processing', 'cycle':cycle},{'$set':{'analyze':percent}}) group_links = list(set(group_links)) for i in range(len(group_links)): datas.extend(getGroupDatas(cookies, group_links[i], playerName)) for i in range(len(linksPlanets)): datas.extend(getDatasPlanets(cookies, linksPlanets[i], playerName)) percent = float(i+1 + len(linksShips))/total_count col_update.update({'id':id_request,'status':'processing', 'cycle':cycle},{'$set':{'analyze':percent}}) return datas if __name__ == "__main__": print getPlayersList()
18,281
7de6e337d783f48562eb578da979063dc2b5151a
from profiles.models import CustomUser, Relationship def profile_pic(request): """ for Sending Profile picture which is a dynamic content to the template """ if request.user.is_authenticated: profile_obj = CustomUser.objects.get(id__exact=request.user.id) pic = profile_obj.avatar return {'picture': pic} return {} def invitation_received_no(request): """ for Sending number of Invitations which is a dynamic content to the template """ if request.user.is_authenticated: profile_obj = CustomUser.objects.get(id__exact=request.user.id) qs_count = Relationship.objects.invitation_received(profile_obj).count() return {'invites_num': qs_count} return {}
18,282
38f2d35d9ab230e52dc068f21a3df68f4e619d08
''' Aggregate data ''' import argparse, os, sys, errno, subprocess, csv run_identifier = "RUN_" primary_traits = ["not","nand","and","ornot","or","andnot"] even_traits = {"not", "and", "or"} odd_traits = {"nand", "ornot", "andnot"} even_profile = "101010" odd_profile = "010101" all_profile = "111111" max_pop_size = 3600 extra_traits = {"nor","xor","equals","logic_3aa","logic_3ab","logic_3ac","logic_3ad","logic_3ae","logic_3af","logic_3ag","logic_3ah","logic_3ai","logic_3aj","logic_3ak","logic_3al","logic_3am","logic_3an","logic_3ao","logic_3ap","logic_3aq","logic_3ar","logic_3as","logic_3at","logic_3au","logic_3av","logic_3aw","logic_3ax","logic_3ay","logic_3az","logic_3ba","logic_3bb","logic_3bc","logic_3bd","logic_3be","logic_3bf","logic_3bg","logic_3bh","logic_3bi","logic_3bj","logic_3bk","logic_3bl","logic_3bm","logic_3bn","logic_3bo","logic_3bp","logic_3bq","logic_3br","logic_3bs","logic_3bt","logic_3bu","logic_3bv","logic_3bw","logic_3bx","logic_3by","logic_3bz","logic_3ca","logic_3cb","logic_3cc","logic_3cd","logic_3ce","logic_3cf","logic_3cg","logic_3ch","logic_3ci","logic_3cj","logic_3ck","logic_3cl","logic_3cm","logic_3cn","logic_3co","logic_3cp"} extra_trait_thresholds = {prop: (max_pop_size * prop) for prop in [0.01, 0.05, 0.1] } extra_trait_thresholds["0"] = 1 time_data_time_series_fields = ["average_generation"] """ This is functionally equivalent to the mkdir -p [fname] bash command """ def mkdir_p(path): try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def extract_params_cmd_log(path): content = None with open(path, "r") as fp: content = fp.read().strip() content = content.replace("./avida", "") params = [param.strip() for param in content.split("-set") if param.strip() != ""] cfg = {param.split(" ")[0]:param.split(" ")[1] for param in params} return cfg def read_avida_dat_file(path, backfill_missing_fields=False): content = None with open(path, "r") as fp: content = fp.read().strip().split("\n") legend_start = 0 legend_end = 0 # Where does the legend table start? for line_i in range(0, len(content)): line = content[line_i].strip() if line == "# Legend:": # Handles analyze mode detail files. legend_start = line_i + 1 break if "# 1:" in line: # Handles time.dat file. legend_start = line_i break # For each line in legend table, extract field fields = [] for line_i in range(legend_start, len(content)): line = content[line_i].strip() if line == "": legend_end = line_i break # patch 3-input logic tasks because avida file format is nonsense if "Logic 3" in line: line = line.split("(")[0] fields.append( line.split(":")[-1].strip().lower().replace(" ", "_") ) data = [] for line_i in range(legend_end, len(content)): line = content[line_i].strip() if line == "": continue data_line = line.split(" ") if len(data_line) > len(fields): print("found more items than there are fields!") print(fields) print(data_line) exit(-1) elif backfill_missing_fields: num_backfill = len(fields) - len(data_line) for _ in range(num_backfill): data_line.append("") elif len(data_line) != len(fields): print("data fields mismatch!") print(fields) print(data_line) exit(-1) data.append({field:value for field,value in zip(fields, data_line)}) return data def read_csv(file_path): content = None with open(file_path, "r") as fp: content = fp.read().strip().split("\n") header = content[0].split(",") content = content[1:] lines = [{header[i]: l[i] for i in range(len(header))} for l in csv.reader(content, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)] return lines def simple_match_coeff(a, b): if len(a) != len(b): print(f"Length mismatch! {a} {b}") exit(-1) return sum(ai==bi for ai,bi in zip(a,b)) def main(): parser = argparse.ArgumentParser(description="Run submission script.") parser.add_argument("--data_dir", type=str, help="Where is the base output directory for each run?") parser.add_argument("--dump", type=str, help="Where to dump this?", default=".") parser.add_argument("--update", type=int, help="Update to pull data for?") parser.add_argument("--time_series_range", type=int, help="The range (in updates) to collect time series data?", nargs=2) args = parser.parse_args() data_dir = args.data_dir dump_dir = args.dump update = args.update time_series_range = args.time_series_range if not os.path.exists(data_dir): print("Unable to find data directory.") exit(-1) mkdir_p(dump_dir) # Aggregate run directories. run_dirs = [run_dir for run_dir in os.listdir(data_dir) if run_identifier in run_dir] print(f"Found {len(run_dirs)} run directories.") time_series_content = [] time_series_header = None time_series_fpath = os.path.join(dump_dir, f"time_series_u{time_series_range[0]}-u{time_series_range[1]}.csv") with open(time_series_fpath, "w") as fp: fp.write("") lineage_series_content = [] lineage_series_header = None lineage_series_fpath = os.path.join(dump_dir, f"lineage_series.csv") with open(lineage_series_fpath, "w") as fp: fp.write("") # Only keep lines that fall within specified time series range. def keep_line(u): return u <= time_series_range[1] and u >= time_series_range[0] # For each run directory: # - get id, get command line configuration settings summary_header = None summary_content_lines = [] progress_counter = 0 for run_dir in run_dirs: progress_counter += 1 run_path = os.path.join(data_dir, run_dir) # Skip over (but make note of) incomplete runs. if not os.path.exists(os.path.join(run_path, 'data', 'analysis')): print('Skipping: ', run_path) continue summary_info = {} # Hold summary information about run. (one entry per run) time_series_info = {} lineage_series_info = [] print(f"Processing ({progress_counter}/{len(run_dirs)}): {run_path}") ############################################################ # Extract commandline configuration settings (from cmd.log file) cmd_log_path = os.path.join(run_path, "cmd.log") cmd_params = extract_params_cmd_log(cmd_log_path) # Infer environmental change and change rate from events file chg_env = "chg" in cmd_params["EVENT_FILE"] env_cond = cmd_params["EVENT_FILE"].replace("events_", "").split("_phase")[0].lower() phase = "1" if "phase-one" in cmd_params["EVENT_FILE"] else "2" extra_task_val = "0.0" if "val" in cmd_params["ENVIRONMENT_FILE"]: extra_task_val = "0." + cmd_params["ENVIRONMENT_FILE"].replace(".cfg", "").split("_")[-1].replace("val-", "") summary_info["chg_env"] = chg_env summary_info["environment"] = env_cond summary_info["update"] = update summary_info["phase"] = phase summary_info["extra_task_value"] = extra_task_val for field in cmd_params: summary_info[field] = cmd_params[field] ############################################################ ############################################################ # Extract lineage file data lineage_path = os.path.join(run_path, "data", "lineage.csv") lineage_data = read_csv(lineage_path) # Extract summary info (for specified update) lineage_summary_data = [line for line in lineage_data if int(line["update"]) == update][0] for field in lineage_summary_data: if field == "update": continue summary_info["lineage_"+field] = lineage_summary_data[field] lineage_data = None lineage_summary_data = None ############################################################ ############################################################ # Extract phylodiversity time series data phylodiversity_path = os.path.join(run_path, "data", "phylodiversity.csv") phylodiversity_data = read_csv(phylodiversity_path) # Extract summary info phylo_summary_data = [line for line in phylodiversity_data if int(line["update"]) == update][0] for field in phylo_summary_data: if field == "update": continue summary_info["phylo_"+field] = phylo_summary_data[field] # Extract time series info phylodiversity_data = None phylo_summary_data = None ############################################################ ############################################################ # Extract information from dominant.csv dominant_path = os.path.join(run_path, "data", "dominant.csv") dominant_data = read_csv(dominant_path) dominant_summary_data = [line for line in dominant_data if int(line["update"]) == update][0] summary_info["dominant_lineage_length_taxa"] = dominant_summary_data["dominant_lineage_length"] summary_info["dominant_lineage_deleterious_steps"] = dominant_summary_data["dominant_deleterious_steps"] summary_info["dominant_lineage_phenotypic_volatility"] = dominant_summary_data["dominant_phenotypic_volatility"] summary_info["dominant_lineage_unique_phenotypes"] = dominant_summary_data["dominant_unique_phenotypes"] ############################################################ ############################################################ # Extract time.dat data time_data = read_avida_dat_file(os.path.join(run_path, "data", "time.dat")) # Summery information # - average generation summary_info["time_average_generation"] = [line["average_generation"] for line in time_data if int(line["update"]) == update][0] # Time series information time_data_ts = {line["update"]: {field: line[field] for field in time_data_time_series_fields} for line in time_data if keep_line(int(line["update"]))} # Grab the set of updates we have for our time series to check against other time series data for consistency time_series_updates = set(time_data_ts.keys()) # initialize info dictionary for each time series update for u in time_series_updates: time_series_info[u] = {} # Store time data time series info for u in time_series_updates: for field in time_data_ts[u]: time_series_info[u]["time_" + field] = time_data_ts[u][field] time_data = None # release time_data time_data_ts = None ############################################################ ############################################################ # Extract tasks.dat data task_data = read_avida_dat_file(os.path.join(run_path, "data", "tasks.dat")) # Extract summary info final_tasks_data = task_data[-1] if (final_tasks_data["update"] != str(update)): print(f"Final tasks update {final_tasks_data['update']} does not match requested analysis update {update}") exit(-1) tasks_found = set([]) for task in extra_traits: if task in final_tasks_data: tasks_found.add(task) print(f" Found {len(tasks_found)} / {len(extra_traits)} possible tasks in tasks.dat") final_discovered_tasks = {proportion:set([]) for proportion in extra_trait_thresholds} for line in task_data: for trait in extra_traits: if not trait in line: continue for proportion in extra_trait_thresholds: threshold = extra_trait_thresholds[proportion] if int(line[trait]) >= threshold: final_discovered_tasks[proportion].add(trait) for proportion in extra_trait_thresholds: threshold = extra_trait_thresholds[proportion] summary_info[f"final_pop_extra_tasks_{proportion}"] = sum([int(int(final_tasks_data[trait]) > threshold) for trait in extra_traits if trait in final_tasks_data]) summary_info[f"discovered_extra_tasks_{proportion}"] = len(final_discovered_tasks[proportion]) # Extract time series information task_data_ts = {line["update"]: {field: line[field] for field in extra_traits} for line in task_data if keep_line(int(line["update"]))} for u in time_series_updates: task_counts = {proportion: set([]) for proportion in extra_trait_thresholds} for task in task_data_ts[u]: for proportion in extra_trait_thresholds: threshold = extra_trait_thresholds[proportion] if ( int(task_data_ts[u][task]) >= threshold): task_counts[proportion].add(task) for proportion in extra_trait_thresholds: time_series_info[u][f"task_count_{proportion}"] = len(task_counts[proportion]) task_data = None task_data_ts = None ############################################################ ############################################################ # Extract .spop file info # - What are the unique keys shared across analyze mode output/.spop file? # - tuple(sequence, update born) spop_data = read_avida_dat_file(os.path.join(run_path, "data", f"detail-{update}.spop"), True) # each entry is indexed off of update born and genome sequence so we can easily lookup spop # info when grabbing data from analyze mode files. spop_lookup_table = { tuple([line["update_born"], line["genome_sequence"]]):line for line in spop_data } def spop_lookup(update_born, sequence, field): return spop_lookup_table[tuple([update_born, sequence])][field] spop_data = None ############################################################ ############################################################ # Extract environment-specific final dominant information. dom_env_all = read_avida_dat_file(os.path.join(run_path, "data", "analysis", "env_all", "final_dominant.dat")) dom_env_odd = read_avida_dat_file(os.path.join(run_path, "data", "analysis", "env_odd", "final_dominant.dat")) dom_env_even = read_avida_dat_file(os.path.join(run_path, "data", "analysis", "env_even", "final_dominant.dat")) # (each of these files should only have one genotype in them) if len(dom_env_all) != 1 and len(dom_env_even) != 1 and len(dom_env_odd) != 1: print("Unexpected number of genotypes in final_dominant data files.") exit(-1) dom_env_all = dom_env_all[0] dom_env_odd = dom_env_odd[0] dom_env_even = dom_env_even[0] # Collect dominant genotype data. summary_info["dominant_genome_length"] = dom_env_all["genome_length"] summary_info["dominant_generation_born"] = spop_lookup( update_born=dom_env_all["update_born"], sequence=dom_env_all["genome_sequence"], field="generation_born" ) phenotype_even = "".join([dom_env_even[trait] for trait in primary_traits]) phenotype_odd = "".join([dom_env_odd[trait] for trait in primary_traits]) phenotype_all = "".join([dom_env_all[trait] for trait in primary_traits]) phenotype_task_order = ";".join(primary_traits) plastic_odd_even = phenotype_even != phenotype_odd match_score_even = simple_match_coeff(phenotype_even, even_profile) match_score_odd = simple_match_coeff(phenotype_odd, odd_profile) match_score_all = simple_match_coeff(phenotype_all, all_profile) match_score_odd_even = match_score_even + match_score_odd optimal_plastic = match_score_even == len(even_profile) and match_score_odd == len(odd_profile) summary_info["dominant_phenotype_even"] = phenotype_even summary_info["dominant_phenotype_odd"] = phenotype_odd summary_info["dominant_phenotype_all"] = phenotype_all summary_info["dominant_phenotype_task_order"] = phenotype_task_order summary_info["dominant_plastic_odd_even"] = plastic_odd_even summary_info["dominant_match_score_even"] = match_score_even summary_info["dominant_match_score_odd"] = match_score_odd summary_info["dominant_match_score_all"] = match_score_all summary_info["dominant_match_score_odd_even"] = match_score_odd_even summary_info["dominant_optimal_plastic"] = optimal_plastic # collect extra task information extra_traits_expressed = set([]) for trait in extra_traits: even_expressed = int(dom_env_even[trait]) > 0 odd_expressed = int(dom_env_odd[trait]) > 0 if even_expressed or odd_expressed: extra_traits_expressed.add(trait) summary_info["dominant_extra_tasks"] = len(extra_traits_expressed) ############################################################ ############################################################ # Extract mutation accumulation data from dominant lineage # - mutation information will be the same for all lineage data files. lineage_env_all = read_avida_dat_file(os.path.join(run_path, "data", "analysis", "env_all", "lineage_tasks.dat")) lineage_env_odd = read_avida_dat_file(os.path.join(run_path, "data", "analysis", "env_odd", "lineage_tasks.dat")) lineage_env_even = read_avida_dat_file(os.path.join(run_path, "data", "analysis", "env_even", "lineage_tasks.dat")) summary_info["dominant_lineage_length_genotypes"] = len(lineage_env_all) sub_mut_cnt = 0 ins_mut_cnt = 0 dels_mut_cnt = 0 lineage_tasks_ot = [set([]) for _ in range(len(lineage_env_all))] # primary_task_profiles_ot = [None for _ in range(len(lineage_env_all))] primary_task_profiles_ot = [{"odd": None, "even": None, "const": None, "aggregate": None} for _ in range(len(lineage_env_all))] extra_traits_discovered = set([]) for i in range(len(lineage_env_all)): ancestor_info = {} ancestor_info["update"] = lineage_env_all[i]["update_born"] # collect mutation information for this ancestor muts_from_parent = lineage_env_all[i]["mutations_from_parent"].split(",") for mut in muts_from_parent: if (len(mut) == 0): continue if (mut[0] == "M"): sub_mut_cnt += 1 elif (mut[0] == "I"): ins_mut_cnt += 1 elif (mut[0] == "D"): dels_mut_cnt += 1 else: print("Unknown mutation type (" + str(mut) + ")!") # collect extra task information for this ancestor for trait in extra_traits: even_expressed = int(lineage_env_even[i][trait]) > 0 odd_expressed = int(lineage_env_odd[i][trait]) > 0 const_expressed = int(lineage_env_all[i][trait]) > 0 if chg_env and (even_expressed or odd_expressed): lineage_tasks_ot[i].add(trait) extra_traits_discovered.add(trait) elif (not chg_env) and (const_expressed): lineage_tasks_ot[i].add(trait) extra_traits_discovered.add(trait) ancestor_phenotype_even = "".join([lineage_env_even[i][trait] for trait in primary_traits]) ancestor_phenotype_odd = "".join([lineage_env_odd[i][trait] for trait in primary_traits]) ancestor_phenotype_const = "".join([lineage_env_all[i][trait] for trait in primary_traits]) primary_task_profiles_ot[i]["even"] = ancestor_phenotype_even primary_task_profiles_ot[i]["odd"] = ancestor_phenotype_odd primary_task_profiles_ot[i]["const"] = ancestor_phenotype_const if chg_env: primary_task_profiles_ot[i]["aggregate"] = ancestor_phenotype_even + ancestor_phenotype_odd else: primary_task_profiles_ot[i]["aggregate"] = ancestor_phenotype_const ancestor_info["extra_traits"] = len(lineage_tasks_ot[i]) ancestor_info["match_score_even"] = simple_match_coeff(ancestor_phenotype_even, even_profile) ancestor_info["match_score_odd"] = simple_match_coeff(ancestor_phenotype_odd, odd_profile) lineage_series_info.append(ancestor_info) # save summary mutation info total_muts = sub_mut_cnt + ins_mut_cnt + dels_mut_cnt summary_info["dominant_lineage_substitution_mut_cnt"] = sub_mut_cnt summary_info["dominant_lineage_insertion_mut_cnt"] = ins_mut_cnt summary_info["dominant_lineage_deletion_mut_cnt"] = dels_mut_cnt summary_info["dominant_lineage_total_mut_cnt"] = total_muts # analyze lineage task information extra_traits_gained = 0 # total number of times that any trait is gained extra_traits_lost = 0 # total number of times that any trait is lost extra_traits_lost_linked_to_primary_change = 0 task_profile_volatility = 0 for i in range(len(lineage_tasks_ot)): ##### Trait gain/loss current_traits = lineage_tasks_ot[i] if not i: extra_traits_gained += len(current_traits) else: previous_traits = lineage_tasks_ot[i-1] # gained traits are traits in current_traits but not in previous traits gained_traits = current_traits - previous_traits # lost traits are traits in previous traits but not in current traits lost_traits = previous_traits - current_traits # update gain/lost information extra_traits_gained += len(gained_traits) extra_traits_lost += len(lost_traits) ##### Task profile volatility current_profile = primary_task_profiles_ot[i]["aggregate"] previous_traits = primary_task_profiles_ot[i-1]["aggregate"] task_profile_volatility += int(current_profile != previous_traits) if current_profile != previous_traits: extra_traits_lost_linked_to_primary_change += len(lost_traits) summary_info["dominant_lineage_extra_traits_gained"] = extra_traits_gained summary_info["dominant_lineage_extra_traits_lost"] = extra_traits_lost summary_info["dominant_lineage_extra_traits_lost_linked_to_primary_change"] = extra_traits_lost_linked_to_primary_change summary_info["dominant_lineage_extra_traits_discovered"] = len(extra_traits_discovered) summary_info["dominant_lineage_trait_volatility"] = task_profile_volatility lineage_env_all = None lineage_env_odd = None lineage_env_even = None ############################################################ ############################################################ # Output time series data for this run # Add extra fields for u in time_series_info: time_series_info[u]["update"] = u # Make sure that update is a field on every line time_series_info[u]["RANDOM_SEED"] = summary_info["RANDOM_SEED"] time_series_info[u]["DISABLE_REACTION_SENSORS"] = summary_info["DISABLE_REACTION_SENSORS"] time_series_info[u]["chg_env"] = summary_info["chg_env"] time_series_info[u]["environment"] = summary_info["environment"] time_series_info[u]["extra_task_value"] = summary_info["extra_task_value"] time_series_fields = list(time_series_info[str(time_series_range[0])].keys()) time_series_fields.sort() write_header = False if time_series_header == None: write_header = True time_series_header = ",".join(time_series_fields) elif time_series_header != ",".join(time_series_fields): print("Time series header mismatch!") exit(-1) time_series_content = [] update_order = list(map(int, time_series_info.keys())) update_order.sort() for u in update_order: time_series_content.append(",".join([str(time_series_info[str(u)][field]) for field in time_series_fields])) with open(time_series_fpath, "a") as fp: if write_header: fp.write(time_series_header) fp.write("\n") fp.write("\n".join(time_series_content)) time_series_content = [] ############################################################ ############################################################ # Output lineage series information ############################################################ for i in range(len(lineage_series_info)): lineage_series_info[i]["RANDOM_SEED"] = summary_info["RANDOM_SEED"] lineage_series_info[i]["DISABLE_REACTION_SENSORS"] = summary_info["DISABLE_REACTION_SENSORS"] lineage_series_info[i]["chg_env"] = summary_info["chg_env"] lineage_series_info[i]["environment"] = summary_info["environment"] lineage_series_info[i]["extra_task_value"] = summary_info["extra_task_value"] lineage_series_fields = list(lineage_series_info[0].keys()) lineage_series_fields.sort() write_header = False if lineage_series_header == None: write_header = True lineage_series_header = ",".join(lineage_series_fields) elif lineage_series_header != ",".join(lineage_series_fields): print("Lineage series header mismatch!") exit(-1) lineage_series_content = [] for i in range(len(lineage_series_info)): lineage_series_content.append(",".join([str(lineage_series_info[i][field]) for field in lineage_series_fields])) with open(lineage_series_fpath, "a") as fp: if write_header: fp.write(lineage_series_header) fp.write("\n") fp.write("\n".join(lineage_series_content)) lineage_series_content = [] ############################################################ # Add summary_info to aggregate content summary_fields = list(summary_info.keys()) summary_fields.sort() if summary_header == None: summary_header = summary_fields elif summary_header != summary_fields: print("Header mismatch!") exit(-1) summary_line = [str(summary_info[field]) for field in summary_fields] summary_content_lines.append(",".join(summary_line)) ############################################################ # write out aggregate data with open(os.path.join(dump_dir, "aggregate.csv"), "w") as fp: out_content = ",".join(summary_header) + "\n" + "\n".join(summary_content_lines) fp.write(out_content) if __name__ == "__main__": main()
18,283
a5133b5aece155404cf2824fa784a53650991dbd
""" Author: Luis_C-137 Using a spherical gausian distribution exmaple This is just for practice purposes This is NOT functional code """ from scipy.stats import multivariate_normal as mvn import numpy as np import matplotlib.pyplot as plt cov = np.array([[1,0.8],[0.8,3]]) mu = np.array([0,2]) # r = mvn.rvs(mean=mu, cov=cov,size=1000) # We can use Scipy OR r = np.random.multivariate_normal(mean=mu, cov=cov, size=1000) # Use Numpy plt.scatter(r[:,0],r[:,1]) plt.axis('equal') plt.show()
18,284
0cd8c3f235a49dc0b878b9da7514ac940b885367
print(">> Progeam Change number to Text <<") num = int(input("Enter integer number : ")) print(f"Number : {num}") snum = "" for i in range(len(str(num))) : n = num % 10 if n == 0 : snum = "Zero " + snum elif n == 1 : snum = "One " + snum elif n == 2 : snum = "Two " + snum elif n == 3 : snum = "Three " + snum elif n == 4 : snum = "Four " + snum elif n == 5 : snum = "Five " + snum elif n == 6 : snum = "Six " + snum elif n == 7 : snum = "Seven " + snum elif n == 8 : snum = "Eight " + snum elif n == 9 : snum = "Nine " + snum num = num // 10 print("Text :",snum) print("Exit Program")
18,285
3f8298cbc47ddc3d9bf4d3c736d2ec12512f7090
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Distribute CTR model for test fleet api """ import os import shutil import tempfile import time import ctr_dataset_reader import numpy as np from dist_fleet_ctr import TestDistCTR2x2, fake_ctr_reader from test_dist_fleet_base import runtime_main import paddle from paddle import fluid # Fix seed for test fluid.default_startup_program().random_seed = 1 fluid.default_main_program().random_seed = 1 class TestDistGpuPsCTR2x2(TestDistCTR2x2): """ For test CTR model, using Fleet api & PS-GPU """ def check_model_right(self, dirname): model_filename = os.path.join(dirname, "__model__") with open(model_filename, "rb") as f: program_desc_str = f.read() program = fluid.Program.parse_from_string(program_desc_str) with open(os.path.join(dirname, "__model__.proto"), "w") as wn: wn.write(str(program)) def do_pyreader_training(self, fleet): """ do training using dataset, using fetch handler to catch variable Args: fleet(Fleet api): the fleet object of Parameter Server, define distribute training role """ device_id = int(os.getenv("FLAGS_selected_gpus", "0")) place = fluid.CUDAPlace(device_id) exe = fluid.Executor(place) exe.run(fleet.startup_program) fleet.init_worker() batch_size = 4 train_reader = paddle.batch(fake_ctr_reader(), batch_size=batch_size) self.reader.decorate_sample_list_generator(train_reader) for epoch_id in range(1): self.reader.start() try: pass_start = time.time() while True: loss_val = exe.run( program=fleet.main_program, fetch_list=[self.avg_cost.name], ) loss_val = np.mean(loss_val) reduce_output = fleet.util.all_reduce( np.array(loss_val), mode="sum" ) loss_all_trainer = fleet.util.all_gather(float(loss_val)) loss_val = float(reduce_output) / len(loss_all_trainer) message = "TRAIN ---> pass: {} loss: {}\n".format( epoch_id, loss_val ) fleet.util.print_on_rank(message, 0) pass_time = time.time() - pass_start except fluid.core.EOFException: self.reader.reset() model_dir = tempfile.mkdtemp() fleet.save_inference_model( exe, model_dir, [feed.name for feed in self.feeds], self.avg_cost ) if fleet.is_first_worker(): self.check_model_right(model_dir) if fleet.is_first_worker(): fleet.save_persistables(executor=exe, dirname=model_dir) shutil.rmtree(model_dir) def do_dataset_training(self, fleet): ( dnn_input_dim, lr_input_dim, train_file_path, ) = ctr_dataset_reader.prepare_data() device_id = int(os.getenv("FLAGS_selected_gpus", "0")) place = fluid.CUDAPlace(device_id) exe = fluid.Executor(place) exe.run(fleet.startup_program) fleet.init_worker() thread_num = 2 batch_size = 128 filelist = [] for _ in range(thread_num): filelist.append(train_file_path) # config dataset dataset = paddle.distributed.QueueDataset() dataset._set_batch_size(batch_size) dataset._set_use_var(self.feeds) pipe_command = 'python ctr_dataset_reader.py' dataset._set_pipe_command(pipe_command) dataset.set_filelist(filelist) dataset._set_thread(thread_num) for epoch_id in range(1): pass_start = time.time() dataset.set_filelist(filelist) exe.train_from_dataset( program=fleet.main_program, dataset=dataset, fetch_list=[self.avg_cost], fetch_info=["cost"], print_period=2, debug=int(os.getenv("Debug", "0")), ) pass_time = time.time() - pass_start if os.getenv("SAVE_MODEL") == "1": model_dir = tempfile.mkdtemp() fleet.save_inference_model( exe, model_dir, [feed.name for feed in self.feeds], self.avg_cost, ) if fleet.is_first_worker(): self.check_model_right(model_dir) if fleet.is_first_worker(): fleet.save_persistables(executor=exe, dirname=model_dir) shutil.rmtree(model_dir) if __name__ == "__main__": runtime_main(TestDistGpuPsCTR2x2)
18,286
6b042c3fe5c5f307c5d5687bf76bdef4055fba20
'''This is main of Invoice Generator for Translators by E. Turkulainen''' import job_lister import invoice_generator import view_jobs customers = ['Customer 1', 'Customer 2'] print('\nHello there! Welcome to Invoice Generator.\nIs there something specific you\'re looking' ' to accomplish today?\n') k = True while k is True: choice = int(input('\n1. Add a completed job\n' '2. View a month\'s completed jobs and their total\n' '3. Generate an invoice\n' '4. Quit\n')) if choice == 1: client = input('\nAight, for whom did you work for?\n' '1. Customer 1\n' '2. Customer 2\n') job_lister.add_job(customers[int(client)-1]) elif choice == 2: view_jobs.view_jobs() elif choice == 3: another = 'y' while another == 'y': number = input('\nWho owes ya?\n' '1. Customer 1\n' '2. Customer 2\n') customer = customers[int(number)-1] invoiceno = input('And your invoice number?\n') print('Gotcha!\n') invoice_generator.generate(customer, invoiceno) print('Ka-ching! Generation ready. Enjoy :)') another = input('Create another?\ny/n\n') if another == 'n': print('Awesome!\n') else: print('Coming right up!\n') elif choice == 4: break print('\nAnything else?\n') print('\nKay, see ya next time!\n') exit()
18,287
668ef44359d79cb76df7b7ced2aed63e2da1b75b
import flask import yfinance as yf app = flask.Flask(__name__) @app.route('/ticker/<symbol>') def ticker(symbol): ticker = yf.Ticker(symbol) return flask.jsonify(ticker.info)
18,288
ee421ec67aaf2cd95258170ec13511f6d0e8e19d
from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.mime.base import MIMEBase from email import encoders import mimetypes import os import smtplib import config as cf meu_email = ('chewbaccanoesquilo@gmail.com') senha = ('oaufutzibwdwgatf') email_destino = ('marcos_henrique@outlook.com') msg = MIMEMultipart() msg['from'] = meu_email msg['to'] = email_destino msg['subject'] = 'Entrega TCC.' corpo = MIMEText(cf.msgDoEmail) msg.attach(corpo) pastaEnvio = os.getcwd() + '/ArquivosParaEnvio' fileName = 'ArquivosParaEnvio/teste.docx' attachment = open(fileName,'rb') #http//www.freeformater mimetypeAnexo = mimetypes.guess_type(fileName)[0].split('/') part = MIMEBase(mimetypeAnexo[0],mimetypeAnexo[1]) part.set_payload((attachment).read()) encoders.encode_base64(part) part.add_header('Content-Disposition', f'attachment; filename={fileName}') msg.attach(part) server = smtplib.SMTP(host='smtp.gmail.com', port=587) server.ehlo() server.starttls() server.login(meu_email, senha) text = msg.as_string() server.sendmail(meu_email,email_destino,text ) print(text) server.quit()
18,289
e0c4b523668df5b78fd1f70a4d2df1525b583b4b
from __future__ import print_function import unittest from tweet import DISTURBANCE, DELAY, DETOUR from parsing import extract_lines, extract_ride_type, remove_links, extract_event_type, extract_destination, \ extract_reason class TestParsing(unittest.TestCase): def test_verstoring_druk(self): text = 'Verstoring bus 48 en 65 door extreme drukte. Kijk op https://t.co/VtTXVEd4vF' text = remove_links(text.lower()) self.assertEquals(['48', '65'], extract_lines(text)) self.assertEquals('bus', extract_ride_type(text)) self.assertEquals(DISTURBANCE['nl'][0], extract_event_type(text)) self.assertIsNone(extract_destination(text)) self.assertEqual('extreme drukte', extract_reason(text)) def test_verstoring(self): text ='Verstoring tram 17 (richting Centraal Station) door een technisch defect. Kijk op https://t.co/VtTXVEd4vF' text = remove_links(text.lower()) destination = extract_destination(text) self.assertEqual('centraal station', destination) self.assertEquals(DISTURBANCE['nl'][0], extract_event_type(text)) self.assertEquals(['17'], extract_lines(text)) self.assertEquals('tram', extract_ride_type(text)) def test_delay(self): text = 'Bus 66 rijdt langzaam weer volgens dienstregeling. Houd rekening met vertraging.' text = remove_links(text.lower()) self.assertEquals(DELAY['nl'][0], extract_event_type(text)) self.assertEquals(['66'], extract_lines(text)) self.assertEquals('bus', extract_ride_type(text)) def test_detour(self): text = 'Omleiding tram 7 op last van de brandweer vanwege brand op Hoofdweg' text = remove_links(text.lower()) self.assertEquals(DETOUR['nl'][0], extract_event_type(text)) self.assertEquals(['7'], extract_lines(text)) self.assertEquals('tram', extract_ride_type(text)) def test_delay_tram(self): text = 'Tram 7 rijdt geleidelijk weer volgens dienstregeling. Houd rekening met vertraging.' text = remove_links(text.lower()) self.assertTrue(extract_event_type(text) in DELAY['nl']) self.assertEquals(['7'], extract_lines(text)) self.assertEquals('tram', extract_ride_type(text)) def test_verstoring_repairs_metro(self): text = 'Verstoring metro 51, 53 en 54 door herstelwerkzaamheden. Kijk op https://t.co/VtTXVEd4vF' text = remove_links(text.lower()) self.assertEquals(DISTURBANCE['nl'][0], extract_event_type(text)) self.assertEquals(['51', '53', '54'], extract_lines(text)) self.assertEquals('metro', extract_ride_type(text)) self.assertEqual('herstelwerkzaamheden', extract_reason(text)) def test_verstoring_repairs_bus(self): text = 'Verstoring bus 22 (richting Station Sloterdijk) door werkzaamheden. Kijk op https://t.co/VtTXVEd4vF' text = remove_links(text.lower()) self.assertEquals(DISTURBANCE['nl'][0], extract_event_type(text)) self.assertEquals(['22'], extract_lines(text)) self.assertEquals('bus', extract_ride_type(text)) self.assertEqual('werkzaamheden', extract_reason(text)) def test_running_slowly_tram_2_5(self): text = 'Tram 2 en 5 rijdt langzaam weer volgens dienstregeling. Houd rekening met vertraging.' text = remove_links(text.lower()) self.assertEquals(DELAY['nl'][0], extract_event_type(text)) self.assertEquals(['2', '5'], extract_lines(text)) self.assertEquals('tram', extract_ride_type(text)) def test_running_slowly_2_5_cause(self): text = 'Verstoring tram 2 en 5 door een auto op de rails. Kijk op https://t.co/VtTXVEd4vF' text = remove_links(text.lower()) self.assertEquals(DISTURBANCE['nl'][0], extract_event_type(text)) self.assertEquals(['2', '5'], extract_lines(text)) self.assertEquals('tram', extract_ride_type(text)) self.assertEquals('een auto op de rails', extract_reason(text)) def test_running_two_destinations(self): text = 'Verstoring tram 3 (richting Muiderpoortstation) en 12 (richting Amstelstation) door een technisch defect. Kijk op https://t.co/VtTXVEd4vF' text = remove_links(text.lower()) self.assertEquals(DISTURBANCE['nl'][0], extract_event_type(text)) self.assertEquals(['3', '12'], extract_lines(text)) self.assertEquals('tram', extract_ride_type(text)) def test_running_slowly_two(self): text = 'Tram 9 en 14 rijden langzaam weer volgens dienstregeling. Houd rekening met vertraging.' text = remove_links(text.lower()) self.assertEquals(DELAY['nl'][0], extract_event_type(text)) self.assertEquals(['9', '14'], extract_lines(text)) self.assertEquals('tram', extract_ride_type(text)) @unittest.skip('todo') def test_running_slowly(self): text = 'Bus 48 en 65 rijden weer volgens dienstregeling.' @unittest.skip('todo') def test_works_finished(self): text = 'Werkzaamheden Prins Hendrikkade eerder klaar dan gepland. Vanaf maandag rijden bus 22, 48 en 248 al weer normale dienstregeling.' @unittest.skip('todo') def test_repair_normal_time(self): text = 'Werkzaamheden Prins Hendrikkade eerder klaar dan gepland. Vanaf maandag rijden bus 22, 48 en 248 al weer normale dienstregeling.' @unittest.skip('todo') def test_normal_schedule(self): text = 'Metro 51, 53 en 54 rijden weer volgens dienstregeling.' @unittest.skip('todo') def test_increased_delay(self): text = 'Oplopende vertraging bus 22 door verkeersdrukte Spaarndammerstraat.' @unittest.skip('todo') def test_stop_station(self): text = 'Bus 22 (richting Station Sloterdijk) halteert weer bij de Oostzaanstraat.' def test_chunk(self): the_list = [1, 2, 3, 4] self.assertEquals([(1, 2), (3, 4)], chunk(the_list, 2)) the_list = [1, 2, 3, 4, 5] print(chunk(the_list, 2)) def chunk(the_list, size): from itertools import izip_longest it = [iter(the_list)] * size return list(izip_longest(*it))
18,290
11ad9a3221722cbe359ca8995da44b4a90a01b31
# -*- coding: utf-8 -*- from dummy import * from miniCurl import Curl curl = Curl() #!/usr/bin/env python # -*- coding: utf-8 -*- #author:小光 #refer:http://www.wooyun.org/bugs/wooyun-2015-0121863 def assign(service, arg): if service == "metinfo": return True, arg def audit(arg): url = arg + 'member/' cookie = 'Cookie: PHPSESSID=9be0lkppmei08qedn56funvje0; CNZZDATA1670348=cnzz_eid%3D24422845-1444377232-%26ntime%3D1444377232' data = 'admin_name=admin&Submit=+%E6%89%BE%E5%9B%9E%E5%AF%86%E7%A0%81+' code, head,res, errcode, _ = curl.curl2(url,cookie = cookie,data = data) if code == 200 and 'index_member.php?lang=cn' in res : security_hole(url + " :任意用户密码修改") if __name__ == '__main__': from dummy import * audit(assign('metinfo', 'http://www.topxon.com/')[1])
18,291
21a06832a50d3dc15cc7581453c4c5bac9d3f1a1
import os,sys import json import psycopg2 import datetime import datetime, time import commands import logging import smtplib import base64 import subprocess from email.mime.multipart import MIMEMultipart from email.mime.application import MIMEApplication from email.MIMEBase import MIMEBase from email.mime.text import MIMEText from email import Encoders import ConfigParser import re #import ebcdic g_test_var = 'test_phase' g_func_var = 'reporting.fileload' ############################################################################################################################# # / /# # / validateEmail : Validates all the email id in the property file to check whether they are active. /# # / /# # / /# # / Retruns a list of valid emails /# # / /# # / /# # / /# # / /# ############################################################################################################################# def validateEmail(SERVER, validLst): host = SERVER emailProp = '/home/phdftpuser/%s/project/config/Email.properties' % (g_test_var) config = ConfigParser.RawConfigParser() config.read(emailProp) emailLst = config.get('Email Header', 'email.id').split(',') server = smtplib.SMTP() server.set_debuglevel(0) server.connect(host) server.helo(host) for i in emailLst: server.mail(i) code, msg = server.rcpt(str(i)) if code == 250: validLst.append(i) server.quit() return validLst ############################################################################################################################# # / /# # / sendErromail : Sends the email to the list of active users with the Error and Debug information. /# # / /# # / /# # / Retruns nothing /# # / /# # / /# # / /# # / /# ############################################################################################################################# def sendErromail(errInfo, job, process): print 'Email Started' today = datetime.datetime.now().date() SERVER = "mailhost.waddell.com" BODY = errInfo address_book = [] SUBJECT = '' if process == '': SUBJECT = 'Tasked Failed -- %s in TEST for process on %s' % (job, str(today)) else: SUBJECT = 'Tasked Failed -- %s in TEST @ %s on %s' % (job, process, str(today)) address_book = validateEmail(SERVER, address_book) FROM = "HDFSSupportTeam@waddell.com" TO = ','.join(address_book) msg = MIMEMultipart() msg['Subject'] = SUBJECT msg['From'] = FROM msg['To'] = TO msg.preamble = 'Multipart massage.\n' msg.attach(MIMEText(BODY, 'plain')) try: server = smtplib.SMTP(SERVER) server.sendmail(FROM, address_book, msg.as_string()) print 'Message Sent' except SMTPEXCEPTION: print "Error: unable to send Error email with attachment" ############################################################################################################################# # / /# # / getConn : fetch the property information from the configuration file and returns . /# # / /# # / Retruns host,database,hadoop environment location and network drive environment locations. /# # / /# # / /# ############################################################################################################################# def getConn(): connProp = '/home/phdftpuser/%s/project/config/host.properties' % (g_test_var) config = ConfigParser.RawConfigParser() config.read(connProp) connLst = config.get('Host Property', 'Host.id').split(',') connLst = ','.join(connLst) return connLst.split(',') ############################################################################################################################# # / /# # / readLayout : Reads the json layout for each key and returns the keys and value for the flags. /# # / /# # / Retruns keys and value for the flags /# # / sequence /# # / sequence_ext /# # / seq_ident_char /# # / sec_col_Flag /# # / record_flag /# # / /# ############################################################################################################################# def readLayout(load): recordFlush = '' recordSeq = [] follw_Rcd_Chars = '' recordSeq_ext = '' sec_ord = '' for key, value in load.items(): for v in value: if "record_flag" in v.keys(): if v["record_flag"] == 'True' and "sec_col_Flag" not in v.keys(): recordFlush = key if "sec_col_Flag" in v.keys() and v["record_flag"] == 'True': sec_ord = key if "sequence" in v.keys(): if v["sequence"] == 'True': recordSeq.append(key) if "sequence_ext" in v.keys(): if v["sequence_ext"] == 'True': recordSeq_ext = key follw_Rcd_Chars = v["seq_ident_char"] return recordFlush, recordSeq, recordSeq_ext, sec_ord, follw_Rcd_Chars ############################################################################################################################## # / /# # / /# # / populateNulls : populates nulls for the record that doesn't exist in the input file. /# # / /# # / Returns pipe delimited null string /# # / /# ############################################################################################################################## def populateNulls(item, json_load): res = '' for i in range(1, len(json_load["db_prop"][0]["columns"][0][item].split('|')) + 1): res = res + 'null|' return res ############################################################################################################################## # / /# # / /# # / moveToHDFS : copies the processed file from the network wip drive to HDFS. /# # / /# # / Returns None /# # / /# ############################################################################################################################## def moveToHDFS(progJobPath, jobName, table, g_hdp_loc, nw_Drive): print 'end time' + str(datetime.datetime.now()) fleExst = "/%s/%s/%s.txt" % (g_hdp_loc, jobName, table) print fleExst res = rmrHdfs(fleExst) print res fle_outLoc = "/%s/%s/wip/%s.txt" % (nw_Drive, progJobPath, table) print fle_outLoc fleToHdp = "%s /%s/%s/" % (fle_outLoc, g_hdp_loc, jobName) print fleToHdp putHdfs(fleToHdp) os.remove(fle_outLoc) def testHdfs(absPth): fleExst = "hadoop fs -test -e %s" % (absPth) return subprocess.call(fleExst, shell=True) def rmrHdfs(absPth): # fleExst="hadoop fs -test -e %s"%(absPth) rtrncd = testHdfs(absPth) if int(rtrncd) == 0: hadoopfsjob = "hadoop fs -rmr %s" % (absPth) os.system(hadoopfsjob) return rtrncd def catHdfs(absPth): hadoopFsJob = "hadoop fs -cat %s" % (absPth) rsltSt = commands.getoutput(hadoopFsJob) return rsltSt def putHdfs(absPth): fleToHdp = "hdfs dfs -put %s" % (absPth) os.system(fleToHdp) def fleCheck(absPth, prcFle, mode): if mode == 'r': if os.path.isfile(absPth): print "%s File Exists, loading the file" % (prcFle) fle2Read = open(absPth, mode) else: print "The %s you loaded doesn't exist in the location please recheck the location" % (prcFle) errMsg = "The %s you loaded doesn't exist in the location please recheck the location" % (prcFle) deMsg = "Please check the if %s exists for the process in the location -- %s" % (prcFle, absPth) fnlStrg = errMsg + '\n' + deMsg sendErromail(fnlStrg, '%s doesnt exsist' % (prcFle), '') sys.exit(1) else: try: if os.path.isfile(absPth) and mode == 'a+': fle2Read = open(absPth, mode) elif os.path.isfile(absPth) and mode == 'w': os.remove(absPth) fle2Read = open(absPth, mode) else: fle2Read = open(absPth, 'w') except IOError as e: print "cannot open file ({0}): {1}".format(e.errno, e.strerror) errMsg = "cannot open file ({0}): {1}".format(e.errno, e.strerror) deMsg = "Please Check the filepath: %s" % (absPth) fnlStrg = errMsg + '\n' + deMsg sendErromail(fnlStrg, '%s doesnt exsist' % (prcFle), '') sys.exit(1) return fle2Read def tmeStmpCheck(fleSzeChk, tsOutput, curTSTMp, table): if fleSzeChk: print 'Time log is empty' else: for dte in tsOutput: if curTSTMp == dte.strip(): print 'Process %s' % (table) print 'This file was already processed for timestamp in logs %s,program exiting' % (curTSTMp) errMsg = "This file was already processed for timestamp in logs -%s,program exiting @ %s " % ( curTSTMp, str(datetime.datetime.now())) deMsg = "Please check the input file passed" fnlStrg = errMsg + '\n' + deMsg sendErromail(fnlStrg, 'Input File Parsed Already', table) sys.exit(1) return 'Timestamp Check completed' ############################################################################################################################## # / /# # / flushOutput : flush each of the records in the dictionaries[dict,seqDict] into output file. /# # / /# # / Note: /# # / Whenever there are records in the seqDict ,while loop executes till cntr is less than length of seqDict. /# # / This method will call populateNulls() method to populate nulls for the keys that doesn't exist in dictionaries. /# # / /# # / Returns res_cnt for the count debugging purpose. /# # / /# # / /# ############################################################################################################################## def flushOutput(as_of_dt, bus_flag, dict, seqDict, seq_ext, res_cnt, json_load, seq, op, proc_id, insrt_ts, sec_ord, rec_id, rec_id_fl): output = '' if sec_ord in dict.keys(): col_order = json_load["db_prop"][0]["sec_col_order"] else: col_order = json_load["db_prop"][0]["col_order"] cnt, lent = 1, 1 if bool(seqDict): for i in seqDict.keys(): if lent == int(i.split('_')[1]): lent = lent elif lent < int(i.split('_')[1]): lent = int(i.split('_')[1]) else: lent = lent while True: output = '' if cnt > lent: break for cols in col_order: if cols in dict.keys(): output = output + dict[cols] elif cols in seq: if cols + '_' + str(cnt) in seqDict.keys(): output = output + seqDict[cols + '_' + str(cnt)] else: output = output + populateNulls(cols, json_load) else: output = output + populateNulls(cols, json_load) cnt = cnt + 1 rec_id = rec_id + 1 if bus_flag == 'True': if rec_id_fl == 'True': output = str(rec_id) + '|' + as_of_dt + '|' + output + proc_id + '|' + insrt_ts + '\n' else: output = as_of_dt + '|' + output + proc_id + '|' + insrt_ts + '\n' else: output = output + proc_id + '|' + insrt_ts + '\n' op.write(output) res_cnt = res_cnt + 1 return res_cnt, rec_id ############################################################################################################################## # / /# # / parseLine : This method will take the input line and parse with respective to the json layout . /# # / /# # / Note: /# # / This method iterates over the columns per key in the json layout and parses out the positions and data type /# # / of each column and organize the input line into pipe delimited formatted row. /# # / /# # / Returns formatted string of the input line. /# # / /# # / v.01 updated the code to handle the different date formats /# # / /# # / /# ############################################################################################################################## def parseLine(item, line, json_load): res_st = '' trim_ck = False fd_nm_fl = False diff_length = 0 expected_length = int(json_load["db_prop"][0]["len_of_lne"]) actual_length = int(len(line)) if expected_length > actual_length: diff_length = expected_length - actual_length else: diff_length = actual_length - expected_length for val in json_load["db_prop"][0]["columns"][0][item].split('|'): st = '' if len(json_load[item][0]["attrib"][val][0].split(',')) > 3: start, end, data_type, t = json_load[item][0]["attrib"][val][0].split(',') if t == 'strip()': trim_ck = True else: start, end, data_type = json_load[item][0]["attrib"][val][0].split(',') if fd_nm_fl == 'true': start = str(int(start) + diff_length) end = str(int(end) + diff_length) if len(line) != expected_length and str(val) == 'fund_nm': end = str(int(end) + diff_length) fd_nm_fl = 'true' line = re.sub(r'\x00', ' ', line) if line[int(start):int(end)].strip() == '': st = 'null' elif data_type in ('%m%d%Y', '%Y%m%d', '%y%m%d', '%Y%j', '%m%d%y', '%m/%d/%Y', '%m/%d/%y'): dyn_Zero_flr = '0' from datetime import datetime if line[int(start):int(end)].strip() != dyn_Zero_flr.zfill(len(line[int(start):int(end)].strip())) and line[ int( start):int( start) + 4].strip() != '0000': # dyn_Zero_flr.zfill(int(end)-int(start)): st = str(datetime.strptime(line[int(start):int(end)], data_type).date()) else: st = 'null' elif data_type == 'int': if line[int(start):int(end)].strip() != '': st = line[int(start):int(end)].strip() else: st = 'null' elif data_type.startswith('decimal'): pre, sca = data_type.split('(')[1].split('|') sca = int(sca.replace(')', '')) if len(line[int(start):int(end)]) < int(pre): st = line[int(start):int(end)].zfill(int(pre)) st = line[int(start):int(end) - sca] + '.' + line[int(end) - sca:int(end)] elif data_type == 'null': st = 'null' elif data_type.startswith('Sdecimal'): pre, sca = data_type.split('(')[1].split('|') sca = int(sca.replace(')', '')) intr_Reslt = line[int(start):int(end)] spcl_Chr = re.findall(r'[^0-9:]', intr_Reslt) for chr in spcl_Chr: indx = intr_Reslt.index(chr) chr_Conv = chr.encode('cp1140').encode("hex") if chr_Conv[0:1] == 'c': intr_Reslt = intr_Reslt.replace(chr, chr_Conv[-1]) intr_Reslt = '+' + intr_Reslt[0:int(pre) - sca] + '.' + intr_Reslt[int(pre) - sca:] elif chr_Conv[0:1] == 'd': intr_Reslt = intr_Reslt.replace(chr, chr_Conv[-1]) intr_Reslt = '-' + intr_Reslt[0:int(pre) - sca] + '.' + intr_Reslt[int(pre) - sca:] else: intr_Reslt = intr_Reslt.replace(chr, chr_Conv[-1]) intr_Reslt = intr_Reslt[0:int(pre) - sca] + '.' + intr_Reslt[int(pre) - sca:] st = intr_Reslt else: st = line[int(start):int(end)].strip().replace('|', '').replace('\\', '\\\\') if trim_ck: exec "st = '%s'.%s" % (st, t) trim_ck = False res_st = res_st + st + '|' return res_st ############################################################################################################################## # / /# # / process : This method is the decision maker and core of this program it will identify the lines that needs to /# # / be ignored and flush the result set processed. /# # / Note: /# # / This method have all the dictioanries initialization to keep the record set that needs to be flushed every time /# # / it sees the flush item (which mean start of the record set [CIA,CIB,CIC],[CIA,CIB] which is "CIA" in this case). /# # / Line#343 : This method has the logic to identify the sequence flag enabled keys and route them /# # / appropriate handling process. /# # / There are many flags that will help the script to navigate the line to parseLine method for further /# # / transformations. /# # / This method will call the parseLine and flush method to transform and the push the line to output file. /# # / "mul_seq_Cnt" will store the cntr for the key in case of sequence and update for each occurence of the key /# # / /# # / Returns the count of lines written to the file and timestamp from file for log /# # / /# # / /# # / /# ############################################################################################################################## def process(json_load, inp, op, tsOutput, fleSzeChk, table): print 'entered in to process' flush, seq, seq_ext, sec_ord, follw_Rcd_Chars = readLayout(json_load) cnt, seq_cnt, res_cnt, rec_id = 0, 0, 0, 0 proc_id = json_load["db_prop"][0]["hdfs_proc_id"] insrt_ts = str(datetime.datetime.now()) as_of_dt = '' bus_flag = 'False' if "bus_proc_dt" in json_load["db_prop"][0].keys(): bus_flag = json_load["db_prop"][0]["bus_proc_dt"] rec_id_fl = 'False' mul_seq_Cnt = {} dflt_Key = '' len_of_lne = int(json_load["db_prop"][0]["len_of_lne"]) lsPos, lePos = json_load["db_prop"][0]["line_default_char"][0].split(',') frstDict, seqDict = {}, {} hdrLne, sPos, ePos = json_load["db_prop"][0]["ts_valid"][0].split(',') trLr = json_load["Trailer"][0]["Seq"] print 'start time' + str(datetime.datetime.now()) isPos, iePos = 0, 0 item, recrd_ext = '', '' for line in inp: # if re.search('[\xAE]',line) : # print 'reached at AE' # line = re.sub('[\xAE]',' ',line) # line = re.sub(r'[^\x01-\x7F]','',line) # else: # line = re.sub(r'[^\x01-\x7F]',' ',line) # line = re.sub(r'[^\x00-\x7F]','i',line) offDiff = len(line) - len_of_lne prev_Indx = None uni_Chr = re.findall(r'[^\x01-\x7F]|[\xAE]', line) if len(uni_Chr) != 0: for chr in uni_Chr: chr_Indx = line.index(chr) if "encode_flag" in json_load["db_prop"][0].keys(): if offDiff != 0 and prev_Indx != chr_Indx: line = ''.join(line[0:chr_Indx] + line[chr_Indx + 1:]) else: line = ''.join(line[0:chr_Indx] + ' ' + line[chr_Indx + 1:]) prev_Indx = chr_Indx offDiff -= 1 else: line = ''.join(line[0:chr_Indx] + ' ' + line[chr_Indx + 1:]) if 'no_rec_typ' in json_load["db_prop"][0].keys() and not line.startswith(hdrLne) and not line.startswith(trLr): dflt_Key = flush line = line.rstrip().ljust(len_of_lne) + dflt_Key if line.strip().startswith(hdrLne): timeStamp = line[int(sPos):int(ePos)].strip() tsCheck = tmeStmpCheck(fleSzeChk, tsOutput, timeStamp, table) if bus_flag == 'True': lne, strt_dt, end_dt, rec_id_fl = json_load["db_prop"][0]["as_of_dt"][0].split(',') if (line.strip().startswith(lne.strip())): if (int(end_dt) - int(strt_dt)) == 8: line = line.strip() if line.startswith('FH'): as_of_dt = line[int(end_dt) - 4:int(end_dt)] + '-' + line[int(strt_dt):int( strt_dt) + 2] + '-' + line[int(strt_dt) + 2:int(strt_dt) + 4] else: as_of_dt = line[int(strt_dt):int(strt_dt) + 4] + '-' + line[int(strt_dt) + 4:int( strt_dt) + 6] + '-' + line[int(strt_dt) + 6:int(end_dt)] else: as_of_dt = line[int(end_dt) - 4:int(end_dt)] + '-' + line[ int(strt_dt):int(strt_dt) + 2] + '-' + line[ int( strt_dt) + 3:int( strt_dt) + 5] recrd_ext = line[int(lsPos):int(lePos)] + line[int(isPos):int(iePos)] if (((line.startswith(flush) or line[int(lsPos):int(lePos)] == flush or recrd_ext == flush) or ( line.startswith(sec_ord) and sec_ord != '')) or ( line.startswith(trLr) or trLr == 'default')) and cnt != 0: if len(frstDict.keys()) != 0: res_cnt, rec_id = flushOutput(as_of_dt, bus_flag, frstDict, seqDict, seq_ext, res_cnt, json_load, seq, op, proc_id, insrt_ts, sec_ord, rec_id, rec_id_fl) frstDict, seqDict = {}, {} mul_seq_Cnt = {} if line.startswith(trLr): cnt, rec_id = 0, 0 if line[int(lsPos):int(lePos)] in json_load.keys(): item = line[int(lsPos):int(lePos)] if "seq_ident_FLg" in json_load[item][0].keys(): isPos, iePos = json_load[item][0]["seq_ident_pos"][0].split(',') else: isPos, iePos = 0, 0 if line[int(lsPos):int(lePos)] + line[int(isPos):int(iePos)].strip() in json_load.keys(): item = line[int(lsPos):int(lePos)] + line[int(isPos):int(iePos)].strip() else: item = item if len(seq) == 0 and "attrib" in json_load[item][0].keys(): result = parseLine(item, line, json_load) if len(seq) != 0 and item not in seq: result = parseLine(item, line, json_load) if item in seq: iChars = json_load[item][0]["seq_ident_char"] if line[int(isPos):int(iePos)].strip() in iChars or 'default' in iChars: if item not in mul_seq_Cnt: mul_seq_Cnt[item] = 0 result = parseLine(item, line, json_load) mul_seq_Cnt[item] = mul_seq_Cnt[item] + 1 if line[int(isPos):int(iePos)].strip() in iChars or 'default' in iChars: seqDict[item + '_' + str(mul_seq_Cnt[item])] = result else: if "attrib" in json_load[item][0].keys(): if "mul_recrd_frmt" in json_load[item][0].keys(): frstDict[item[int(lsPos):int(lePos)]] = result else: frstDict[item] = result if flush == item: cnt = cnt + 1 return res_cnt, timeStamp ############################################################################################################################## # / /# # / pigprocess : This process is called for the input that has single record type and is more than a gigabyte,the input /# # / is copied to HDFS location first and uses pig to process the file. /# # / Note: /# # / This method will create pigscripts in the environment(dev,test..etc) project/<project name>/pigscript/folder with /# # / the appropriate script with necessary parameters like mapping will be written to this scripts. /# # / /# # / There is pyudf(parse.py) developed for this process under (/<environment>/project/framework/) that takes mapping /# # / as parameter from the script and pass it to the udf to return a formatted string. /# # / /# # / There are 2 pigscripts involved in this process ,the first will fetch the as_of_dt from the file and return to a /# # / file.The later script will send unformatted lines to pyudf to do data cleansing and returns a formatted line /# # / with date from the previous pig script result appended to it. /# # / /# # / Returns the count of lines written to the file and timestamp from file for log /# # / /# # / /# ############################################################################################################################## def pigprocess(json_load, inp, jobName, fleNme, fle_nm_Splt, fleSzeChk, tsOutput, table, g_hdp_loc): flush, seq, seq_ext, sec_ord, follw_Rcd_Chars = readLayout(json_load) mapLst = '' for i in json_load['db_prop'][0]['columns'][0][flush].split('|'): mapLst += str(json_load[flush][0]['attrib'][i]) + '||' mapLst = mapLst.replace("'", '').replace('u', '') print 'mapLst string is', mapLst len_of_lne = int(json_load['db_prop'][0]["len_of_lne"]) procId = json_load["db_prop"][0]["hdfs_proc_id"] insrt_ts = str(datetime.datetime.now()) if json_load["db_prop"][0]["bus_proc_dt"] == 'True': as_of_dt = str(json_load["db_prop"][0]["as_of_dt"]).replace("'", '').replace('u', '') else: as_of_dt = str(datetime.date.today()) print 'as_of_dt before transformation', as_of_dt hLne = as_of_dt.replace('[', '').replace(']', '').replace('u', '').split(',')[0] tLne = json_load["Trailer"][0]["Seq"] tsStmp = str(json_load["db_prop"][0]["ts_valid"]).replace("'", '').replace('u', '') # print 'as_of_dt after transformation',as_of_dt.replace('[','').replace(']','').split(',') # print 'tsstamp after transformations',tsStmp.replace('[','').replace(']','').split(',') fleTsExst = "/%s/%s/datetime/%s_validation" % (g_hdp_loc, jobName, fle_nm_Splt) tsExstres = rmrHdfs(fleTsExst) fleinHdp = "/%s/%s/inbound/%s" % (g_hdp_loc, jobName, fleNme) inHdpres = rmrHdfs(fleinHdp) fleToHdp = "%s /%s/%s/inbound/" % (inp, g_hdp_loc, jobName) print fleToHdp putHdfs(fleToHdp) ###################################################################################### # / /# # / /# # / Pig script->1 fetch the as_of_dt and valid timestamp on the file. /# # / /# ###################################################################################### pigReg = "register /home/phdftpuser/%s/project/framework/udf/parse.py using jython as my_special_udfs; \n" % ( g_test_var) pigLine1 = "A = LOAD '/%s/%s/inbound/%s'" % (g_hdp_loc, jobName, fleNme) + "AS (line:chararray);\n" pigLine2 = "B = FILTER A BY SUBSTRING(line,0,3)=='%s';\n" % (hLne) pigLine3 = "C = FOREACH B GENERATE my_special_udfs.fetchAs_of_dt(line,'%s','%s');\n" % (as_of_dt, tsStmp) pigLine4 = "STORE C INTO '/%s/%s/datetime/%s_validation';" % (g_hdp_loc, jobName, fle_nm_Splt) tmStmpFile = '/home/phdftpuser/%s/project/%s/pigscript/%s_getdate.pig' % (g_test_var, jobName, fle_nm_Splt) prcFle = 'pig timestamp script' mode = 'w' pigJobFile = fleCheck(tmStmpFile, prcFle, mode) pigJobFile.write(pigReg + pigLine1 + pigLine2 + pigLine3 + pigLine4) pigJobFile.close() pigjob = "pig %s" % (tmStmpFile) os.system(pigjob) hadoopFsJob = "/%s/%s/datetime/%s_validation/part*" % (g_hdp_loc, jobName, fle_nm_Splt) rsltSt = catHdfs(hadoopFsJob) print rsltSt.split('|') curTSTMp = rsltSt.split('|')[1] tsCheck = tmeStmpCheck(fleSzeChk, tsOutput, curTSTMp, table) fleTblExst = "/%s/%s/%s.txt" % (g_hdp_loc, jobName, table) tblExstres = rmrHdfs(fleTblExst) fleCntExst = "/%s/%s/count/%s_count" % (g_hdp_loc, jobName, table) cntExstres = rmrHdfs(fleCntExst) ###################################################################################### # / /# # / /# # / Pig script->2 Process the input lines and stores the formatted lines and count. /# # / /# ###################################################################################### pigprcReg = "register /home/phdftpuser/%s/project/framework/udf/parse.py using jython as my_special_udfs; \n" % ( g_test_var) pigprcLine1 = "A = LOAD '/%s/%s/inbound/%s'" % (g_hdp_loc, jobName, fleNme) + " AS (line:chararray);\n" pigprcLine2 = "B = FILTER A BY SUBSTRING(line,0,3)!='%s' AND SUBSTRING(line,0,3)!='%s';\n" % (hLne, tLne) pigprcLine3 = "C = FOREACH B GENERATE my_special_udfs.lneProcess(line,'%s','%s','%s','%s','%s');\n" % ( mapLst, procId, len_of_lne, insrt_ts, rsltSt.split('|')[0]) pigprcLine4 = "STORE C INTO '/%s/%s/%s.txt' USING PigStorage();\n" % (g_hdp_loc, jobName, table) pigprcLine5 = "D = FOREACH (GROUP C ALL) GENERATE COUNT(C);\n" pigprcLine6 = "STORE D INTO '/%s/%s/count/%s_count' USING PigStorage();\n" % (g_hdp_loc, jobName, table) pigScrpFile = '/home/phdftpuser/%s/project/%s/pigscript/%s_prcfile.pig' % (g_test_var, jobName, fle_nm_Splt) prcFle = 'pig prasing script' mode = 'w' pigprcFile = fleCheck(pigScrpFile, prcFle, mode) pigprcFile.write(pigprcReg + pigprcLine1 + pigprcLine2 + pigprcLine3 + pigprcLine4 + pigprcLine5 + pigprcLine6) pigprcFile.close() pigprcjob = "pig -M %s" % (pigScrpFile) os.system(pigprcjob) fleTblExst = "/%s/%s/%s.txt" % (g_hdp_loc, jobName, table) rtrnCd = testHdfs(fleTblExst) print rtrnCd if int(rtrnCd) == 1: fnlStrg = 'Pig Process failed please check the pig log' sendErromail(fnlStrg, 'Pig Process Failed', '') sys.exit(1) hadoopFscntJob = "/%s/%s/count/%s_count/part*" % (g_hdp_loc, jobName, table) rsltCnt = catHdfs(hadoopFscntJob) print 'result count is', rsltCnt return int(rsltCnt), rsltSt.split('|')[1] ############################################################################################################################## # / /# # / validate : This method validates between the counts returned from the process and external table and inserts /# # / rows into the internal table. /# # / Note: /# # / This method has the flag ts_tl_Flg which will set to "Y" when it is TL and "N" when it is TS base on the /# # / operation variable and will passed as an argument to the hawq function /# # / /# # / Returns Nothing /# # / /# ############################################################################################################################## def validate(conn, cur, schema, tablename, fCount, operation, func): cnt_Chk = 'select count(*) from %s.%s_ext' % (schema, tablename) if operation == 'TS': ts_tl = 'ts' ts_tl_Flg = 'N' insrt = "select %s('%s.%s_ext','%s.%s_%s','%s');" % ( func, schema, tablename, schema, tablename, ts_tl, ts_tl_Flg) else: ts_tl_Flg = 'Y' insrt = "select %s('%s.%s_ext','%s.%s','%s');" % (func, schema, tablename, schema, tablename, ts_tl_Flg) try: cur.execute(cnt_Chk) ext_Cnt = cur.fetchone()[0] print ext_Cnt if ext_Cnt == fCount: cur.execute(insrt) resultSet = cur.fetchone() conn.commit() else: print 'counts doesnt match please check the log tables' errMsg = "The Counts between the program counter - %s and external table - %s count doesnt match" % ( str(fCount), str(ext_Cnt)) deMsg = "Please check the logs.%s table for more info" % (tablename) fnlStrg = errMsg + '\n' + deMsg sendErromail(fnlStrg, 'Counts Doesnt match', tablename) sys.exit(1) print 'resultSet is ', resultSet print 'insert complete' except psycopg2.Error, e: print 'Error %s' % e errMsg = 'Error: Database operational issue' deMsg = 'DEBUG:Please check the query you provided %s' % (e) fnlStrg = errMsg + '\n' + deMsg sendErromail(fnlStrg, 'Database Operational issue', '') sys.exit(1) ############################################################################################################################## # / /# # / main : This is the start of the script where all the arguments are read and passed to each of the methods /# # / process and validate. /# # / Note: /# # / This methode has the "fle_Rmvl_Flg" flag which will tell the script to not to remove the input file as it has /# # / mulitple layouts to be processed. /# # / /# # / /# ############################################################################################################################## def main(arguments): print arguments srcPrp = arguments[0] input_file = arguments[1] trnSfrm = int(arguments[2]) inu_Flnme = input_file.split('/')[-1] if '_' in input_file.split('/')[-1]: fle_nm_Splt = input_file.split('/')[-1].split('_')[0] else: fle_nm_Splt = input_file.split('/')[-1].split('.dat')[0] outPath = input_file.split('/') print str(datetime.datetime.now()) fleSzeChk = False reRun = 0 execLst = [] connLst = [] if len(arguments) > 3: if arguments[3].split('#')[0] == 'rerun': reRun = int(arguments[3].split('#')[-1]) else: connLst = [int(x) for x in arguments[3].split('#')[-1].split(',')] print reRun, connLst flePth = '/'.join(srcPrp.split('/')[0:-2]) + '/json/' print 'absolute file path', flePth # Json pointer Property file check prcFle = 'pointer to Json property file' mode = 'r' jsonSrcPntr = fleCheck(srcPrp, prcFle, mode) for prp in jsonSrcPntr: flePnter = prp.split('|')[1].strip() if prp.split('|')[0] != 'SR': srNo = int(prp.split('|')[0].strip()) if reRun != 0: if srNo >= reRun: execLst.append(flePnter) elif len(connLst) != 0: if srNo in connLst: print 'came here' execLst.append(flePnter) else: execLst.append(flePnter) print execLst g_gp_host, g_gp_db, g_hdp_loc, nw_Drive = getConn() try: conn = psycopg2.connect("hostaddr=%s dbname=%s" % (g_gp_host, g_gp_db)) conn.autocommit = True cur = conn.cursor() except psycopg2.Error, e: print 'Unable to connect to the database %s' % e errMsg = 'Error: Database connecitivity issues -%s' % e deMsg = 'DEBUG:Please check the database connections' fnlStrg = errMsg + '\n' + deMsg sendErromail(fnlStrg, 'Issue in Database Connection String', '') sys.exit(1) if trnSfrm == 0: # Input file Check prcFle = 'input file on nw_drive' mode = 'r' inp = fleCheck(input_file, prcFle, mode) for fle in execLst: layout = flePth + fle print layout # Json layout check prcFle = 'Json Layout file' mode = 'r' layout_File = fleCheck(layout, prcFle, mode) json_load = json.load(layout_File) schema = json_load["db_prop"][0]["schema"] table = json_load["db_prop"][0]["table"] operation = json_load["db_prop"][0]["operation"] func = g_func_var if "mul_fle_Flg" in json_load["db_prop"][0].keys(): fle_nm_Splt = table cnt, tmStmp = 0, '' m, strt, stp = 0, 0, 0 flePath = [] while m < (len(outPath) - 1): if outPath[m] == '%s' % nw_Drive: strt = m if outPath[m] == 'inbound': stp = m m += 1 for n in range(strt + 1, stp): flePath.append(outPath[n]) progJobPath = '/'.join(flePath) jobFileName = outPath[len(outPath) - 1].split('.') jobName = progJobPath.split('/')[-1].lower() print progJobPath, jobName tsValid = '/home/phdftpuser/%s/%s/logs/%s_tstmp.txt' % (g_test_var, jobName, fle_nm_Splt) print tsValid prcFle = 'timestamp log file' mode = 'a+' tsOutput = fleCheck(tsValid, prcFle, mode) if os.path.getsize(tsValid) == 0: print 'Gets here' fleSzeChk = True if trnSfrm == 0: # output file location on nw_drive location check fOutput = '/%s/%s/wip/%s.txt' % (nw_Drive, progJobPath, table) prcFle = 'Output file on nw_drive' mode = 'w' output = fleCheck(fOutput, prcFle, mode) cnt, tmStmp = process(json_load, inp, output, tsOutput, fleSzeChk, table) output.close() print cnt moveToHDFS(progJobPath, jobName, table, g_hdp_loc, nw_Drive) else: cnt, tmStmp = pigprocess(json_load, input_file, jobName, inu_Flnme, fle_nm_Splt, fleSzeChk, tsOutput, table, g_hdp_loc) validate(conn, cur, schema, table, cnt, operation, func) tsOutput.write(tmStmp + '\n') tsOutput.close() if trnSfrm == 0: inp.seek(0) os.remove(input_file) print 'file removed successfully' conn.commit() cur.close() conn.close() if trnSfrm == 0: inp.close() print 'completed' print str(datetime.datetime.now()) if __name__ == "__main__": if len(sys.argv) in (2, 3, 4, 5): main(sys.argv[1:]) else: print "Needs an layout, input file argument and flip code either0 for mutliple record type files and use 1 for input file with single recordtype and more than a gigabyte for the program to proceed and optional rerun and list argument when program exited abnormally" errMsg = 'Error: Arugments error' deMsg = 'DEBUG:Needs an layout, input file argument for the program to proceed' fnlStrg = errMsg + '\n' + deMsg sendErromail(fnlStrg, 'Intial Arugument Error', '') sys.exit(1)
18,292
b8400c033f6392c5a77c31a25a12a8c809c2c326
def submission_helper(pred): p = np.zeros(600) pred.resize(p.shape) p[0] = pred[0] for j in range(1, 600): a = p[j - 1] b = pred[j] if b < a: p[j] = a else: p[j] = b return p def calc_all_areas(images): images = np.array(images) (num_images, times, _, _) = images.shape obj_acc = np.zeros(1200) for j in range(num_images): # print 'Calculating area for time %d and slice %d...' % (i, j) img = np.array(images[j]) img = diff_images(img) #30 frames to 29 diff frames #print img.shape net.blobs['data'].reshape(1, *img.shape) net.blobs['data'].data[...] = img #print 'net.forward() stars' net.forward() #print 'net.forward() finished' prob = net.blobs['prob'].data # 1,1,1,1200 obj = prob[0][1][0] obj_acc = obj_acc + obj # dset.sys_str = (',0'* 50) + (',1.0'*(550)) # dset.dia_str = (',0'* 50) + (',1.0'*(550)) obj_avg = obj_acc / num_images obj_round = np.around(obj_avg, decimals=6) obj_round = np.where(obj_round<0.45, 0.0 , obj_round) obj_round = np.where(obj_round>0.55, 1.0 , obj_round) dset.sys_str = obj_round[:600] dset.dia_str = obj_round[600:] dset.sys_str = submission_helper(dset.sys_str) dset.dia_str = submission_helper(dset.dia_str) dset.sys_str = ','.join(map(str, dset.sys_str)) dset.dia_str = ','.join(map(str, dset.dia_str)) def get_hostogram_dataset(dataset): # shape: num slices, num snapshots, rows, columns print 'Calculating volumes...' calc_all_areas(dataset.images) # area_totals = [calc_total_volume(a, dataset.area_multiplier, dataset.dist) # for a in all_areas] # dataset.dia_str = '' # dataset.sys_str = '' ############################################################################### DATA_PATH = '/home/moon/kaggle/data/' caffe.set_mode_gpu() net = caffe.Net('fcn2_deploy.prototxt', './model_logs/fcn_iter_3000000.caffemodel', caffe.TEST) valid_dir = os.path.join(DATA_PATH, 'validate') studies = next(os.walk(valid_dir))[1] valid_csv = open('submission_v2.csv', 'w') header = 'Id,P0,P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13,P14,P15,P16,P17,P18,P19,P20,P21,P22,P23,P24,P25,P26,P27,P28,P29,P30,P31,P32,P33,P34,P35,P36,P37,P38,P39,P40,P41,P42,P43,P44,P45,P46,P47,P48,P49,P50,P51,P52,P53,P54,P55,P56,P57,P58,P59,P60,P61,P62,P63,P64,P65,P66,P67,P68,P69,P70,P71,P72,P73,P74,P75,P76,P77,P78,P79,P80,P81,P82,P83,P84,P85,P86,P87,P88,P89,P90,P91,P92,P93,P94,P95,P96,P97,P98,P99,P100,P101,P102,P103,P104,P105,P106,P107,P108,P109,P110,P111,P112,P113,P114,P115,P116,P117,P118,P119,P120,P121,P122,P123,P124,P125,P126,P127,P128,P129,P130,P131,P132,P133,P134,P135,P136,P137,P138,P139,P140,P141,P142,P143,P144,P145,P146,P147,P148,P149,P150,P151,P152,P153,P154,P155,P156,P157,P158,P159,P160,P161,P162,P163,P164,P165,P166,P167,P168,P169,P170,P171,P172,P173,P174,P175,P176,P177,P178,P179,P180,P181,P182,P183,P184,P185,P186,P187,P188,P189,P190,P191,P192,P193,P194,P195,P196,P197,P198,P199,P200,P201,P202,P203,P204,P205,P206,P207,P208,P209,P210,P211,P212,P213,P214,P215,P216,P217,P218,P219,P220,P221,P222,P223,P224,P225,P226,P227,P228,P229,P230,P231,P232,P233,P234,P235,P236,P237,P238,P239,P240,P241,P242,P243,P244,P245,P246,P247,P248,P249,P250,P251,P252,P253,P254,P255,P256,P257,P258,P259,P260,P261,P262,P263,P264,P265,P266,P267,P268,P269,P270,P271,P272,P273,P274,P275,P276,P277,P278,P279,P280,P281,P282,P283,P284,P285,P286,P287,P288,P289,P290,P291,P292,P293,P294,P295,P296,P297,P298,P299,P300,P301,P302,P303,P304,P305,P306,P307,P308,P309,P310,P311,P312,P313,P314,P315,P316,P317,P318,P319,P320,P321,P322,P323,P324,P325,P326,P327,P328,P329,P330,P331,P332,P333,P334,P335,P336,P337,P338,P339,P340,P341,P342,P343,P344,P345,P346,P347,P348,P349,P350,P351,P352,P353,P354,P355,P356,P357,P358,P359,P360,P361,P362,P363,P364,P365,P366,P367,P368,P369,P370,P371,P372,P373,P374,P375,P376,P377,P378,P379,P380,P381,P382,P383,P384,P385,P386,P387,P388,P389,P390,P391,P392,P393,P394,P395,P396,P397,P398,P399,P400,P401,P402,P403,P404,P405,P406,P407,P408,P409,P410,P411,P412,P413,P414,P415,P416,P417,P418,P419,P420,P421,P422,P423,P424,P425,P426,P427,P428,P429,P430,P431,P432,P433,P434,P435,P436,P437,P438,P439,P440,P441,P442,P443,P444,P445,P446,P447,P448,P449,P450,P451,P452,P453,P454,P455,P456,P457,P458,P459,P460,P461,P462,P463,P464,P465,P466,P467,P468,P469,P470,P471,P472,P473,P474,P475,P476,P477,P478,P479,P480,P481,P482,P483,P484,P485,P486,P487,P488,P489,P490,P491,P492,P493,P494,P495,P496,P497,P498,P499,P500,P501,P502,P503,P504,P505,P506,P507,P508,P509,P510,P511,P512,P513,P514,P515,P516,P517,P518,P519,P520,P521,P522,P523,P524,P525,P526,P527,P528,P529,P530,P531,P532,P533,P534,P535,P536,P537,P538,P539,P540,P541,P542,P543,P544,P545,P546,P547,P548,P549,P550,P551,P552,P553,P554,P555,P556,P557,P558,P559,P560,P561,P562,P563,P564,P565,P566,P567,P568,P569,P570,P571,P572,P573,P574,P575,P576,P577,P578,P579,P580,P581,P582,P583,P584,P585,P586,P587,P588,P589,P590,P591,P592,P593,P594,P595,P596,P597,P598,P599' valid_csv.write(header) valid_csv.write('\n') studies.sort() for s in studies: dset = Dataset(os.path.join(valid_dir, s), s) print 'Processing dataset %s...' % dset.name try: dset.load() get_hostogram_dataset(dset) valid_csv.write('%s_Diastole,%s\n' % (dset.name, dset.dia_str)) valid_csv.write('%s_Systole,%s\n' % (dset.name, dset.sys_str)) except Exception as e: print '***ERROR***: Exception %s thrown by dataset %s' % (str(e), dset.name) # dset.sys_str = (',0'* 50) + (',1.0'*(550)) # dset.dia_str = (',0'* 50) + (',1.0'*(550)) # valid_csv.write('%s_Diastole%s\n' % (dset.name, dset.dia_str)) # valid_csv.write('%s_Systole%s\n' % (dset.name, dset.sys_str)) valid_csv.close()
18,293
4a12ede431445dfbec7ed09752ba8ad85a7a93a5
# Generated by Django 3.1.4 on 2020-12-09 12:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('portfolio', '0007_portfolio_about'), ] operations = [ migrations.AlterField( model_name='portfolio', name='about', field=models.TextField(max_length=255), ), ]
18,294
2257f375b8d507e3812dd872da77c22518a9bec9
import discord from discord.ext import commands class Mute(commands.Cog): def init(self, bot): self.bot = bot self._last_member = None @commands.command() @commands.has_permissions(kick_members=True) async def mute(self, ctx, member: discord.Member, *, reason=None): guild = ctx.guild mutedRole = discord.utils.get(guild.roles, name="Muted") if not mutedRole: mutedRole = await guild.create_role(name="Muted") for channel in guild.channels: await channel.set_permissions(mutedRole, speak=False, send_messages=False, read_message_history=True, read_messages=False) await member.add_roles(mutedRole, reason=reason) await member.send(f"You have been muted from: {guild.name}") await ctx.send(f'`{member}` is has been muted.') @commands.command() @commands.has_permissions(kick_members=True) async def unmute(self, ctx, member: discord.Member, *, reason=None): guild = ctx.guild mutedRole = discord.utils.get(ctx.guild.roles, name="Muted") if mutedRole in member.roles: await member.remove_roles(mutedRole, reason=reason) await member.send(f"You have been unmuted from: {guild.name}") await ctx.send(f'`{member}` is has been unmuted.') else: await ctx.send(f'`{member}` is not currently muted.') def setup(bot): bot.add_cog(Mute(bot)) #embed = discord.Embed(title="muted", description=f"{member.mention} was muted ", colour=discord.Colour.light_gray()) #embed.add_field(name="reason:", value=reason, inline=False) #await ctx.send(embed=embed)
18,295
5661ae13ce27efb3050e0c9d91a19864e989694d
import time import math import sys sys.setrecursionlimit(2 * 10 ** 5) def findMultiplesOf3and5(number): sum = 0 for i in range(1, number): if i % 3 == 0 or i % 5 == 0: sum = sum + i print(sum) def fibonacci(n): a, b = 0, 1 sumValue = 0 while b < n: a, b = b, a + b if b % 2 == 0: sumValue = sumValue + b return sumValue ''' # This solution used sqrt function which takes addition logN time. The second solution to iterate over factor seems more efficient. def find_greatest_prime_factor(num): #print(num) # base condition - if num < 3: return num # find the sqrt num_sqrt = math.ceil(math.sqrt(num)) for i in xrange(int(num_sqrt), 1, -1): if num%i == 0: #check if i is prime if find_greatest_prime_factor(i) == i: return i #num is prime - return num return num ''' def largest_prime_factor(num, div=2): while div < num: if num % div == 0 and num / div > 1: num = num / div div = 2 else: div = div + 1 return num start_time = time.time() # findMultiplesOf3and5(1000) #print(fibonacci(4000000)) print(largest_prime_factor(600851475143)) print("This took", time.time() - start_time, "seconds to run.") #start_time = time.time() #print("This took", time.time() - start_time, "seconds to run.")
18,296
d3fcba6dc85e4e1fa011d12d191af0692ef2d7a3
def get_image(file_name): with open(file_name) as f: image = [int(i) for i in f.readline().rstrip().split()[0]] return image def get_row(image, start_index, width): row = [image[start_index:][pixel] for pixel in range(width)] return row def get_layer(image, start_index, width, height): layer = [[] for i in range(height)] for i in range(height): layer[i] = get_row(image, start_index, width) start_index += width return layer, start_index def get_all_layers(image, start_index, width, height): num_layers = int(len(image) / (width * height)) all_layers = [[] for i in range(num_layers)] for layer in range(num_layers): all_layers[layer], start_index = get_layer(image, start_index, width, height) return all_layers def count_digit(layer, digit): flattened = [] for row in layer: flattened += row return flattened.count(digit) def merge_layers(all_layers): final_image = all_layers[0].copy() for l, layer in enumerate(all_layers): for r, row in enumerate(layer): for p, pixel in enumerate(row): if pixel == 0 or pixel == 1: final_image[r][p] = pixel return final_image image = get_image("08.txt") all_layers = get_all_layers(image, 0, 25, 6) num_zeros = [count_digit(layer, 0) for layer in all_layers] num_ones = [count_digit(layer, 1) for layer in all_layers] num_twos = [count_digit(layer, 2) for layer in all_layers] position = num_zeros.index(min(num_zeros)) total = num_ones[position] * num_twos[position] print(total) all_layers.reverse() for i in merge_layers(all_layers): print(i)
18,297
3388a8e28bdacdedfa250b490e3f422a48501157
import tkinter as tk from tkinter import ttk root = tk.Tk() label = ttk.Label(root) label.config(text='Hi, there') label.pack() root.mainloop()
18,298
f6114ff257ced066c8f3f1a294cf0f80da798bf1
import unittest from leetcode.longest_palindrome import longest_palindromic_substring_dp as longest_palindrome import leetcode class TestLongestPalindrome(unittest.TestCase): def test1(self): s = "babad" if longest_palindrome(s) in ["aba","bab"]: self.assertEqual(1,1) else: self.assertEqual(1,2) def test_null_str(self): s = None if longest_palindrome(s) in [""]: self.assertEqual(1,1) else: self.assertEqual(1,2) self.assertEqual(longest_palindrome(s), "") def test_empty_str(self): s = "" if longest_palindrome(s) in [""]: self.assertEqual(1,1) else: self.assertEqual(1,2) self.assertEqual(longest_palindrome(s), "") def test_nonpalindrome(self): s = "abc" if longest_palindrome(s) in ["a", "b", "c"]: self.assertEqual(1,1) else: self.assertEqual(1,2) def test2(self): s = "aaaaaab" if longest_palindrome(s) in ["aaaaaa"]: self.assertEqual(1,1) else: self.assertEqual(1,2) def test3(self): s = "abcdbb" if longest_palindrome(s) in ["bb"]: self.assertEqual(1,1) else: self.assertEqual(1,2) def test4(self): s = "aaabaaaa" if longest_palindrome(s) in ["aaabaaa"]: self.assertEqual(1,1) else: self.assertEqual(1,2) def test5(self): s = "abababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababa" self.assertEqual(leetcode.longest_palindrome.longest_palindrome_original(s), leetcode.longest_palindrome.longest_palindromic_substring_dp(s)) if __name__ == "__main__": unittest.main()
18,299
5cbf1a9bef7cd44bf422a84917b9981892aa3ac1
from torch import nn from torch.autograd import Variable from torch.nn import functional as f import torch import numpy COVERAGE = 50 class Encoder(nn.Module): def __init__(self, input_size): super(Encoder, self).__init__() self.kernel_size_vertical = (3,1) self.kernel_size_horizontal = (1,3) self.leakyrelu = nn.LeakyReLU() self.n_channels_0_vertical = input_size self.n_channels_1_vertical = input_size*2 self.n_channels_2_vertical = input_size*4 self.n_channels_3_vertical = input_size*8 # self.n_channels_4_vertical = input_size*16 self.n_channels_0_horizontal = input_size self.n_channels_1_horizontal = input_size*4 self.n_channels_2_horizontal = input_size*8 self.n_channels_3_horizontal = input_size*16 print(self.n_channels_0_vertical) print(self.n_channels_1_vertical) print(self.n_channels_2_vertical) print(self.n_channels_3_vertical) # print(self.n_channels_4_vertical) print(self.n_channels_0_horizontal) print(self.n_channels_1_horizontal) print(self.n_channels_2_horizontal) print(self.n_channels_3_horizontal) self.conv2d_1_vertical = nn.Conv2d(in_channels=self.n_channels_0_vertical, out_channels=self.n_channels_1_vertical, kernel_size=self.kernel_size_vertical, padding=(1,0), groups=input_size) self.conv2d_2_vertical = nn.Conv2d(in_channels=self.n_channels_1_vertical, out_channels=self.n_channels_2_vertical, kernel_size=self.kernel_size_vertical, padding=(1,0)) self.conv2d_3_vertical = nn.Conv2d(in_channels=self.n_channels_2_vertical, out_channels=self.n_channels_3_vertical, kernel_size=self.kernel_size_vertical, padding=(1,0)) # self.conv2d_4_vertical = nn.Conv2d(in_channels=self.n_channels_3_vertical, # out_channels=self.n_channels_4_vertical, # kernel_size=self.kernel_size_vertical, # padding=(1,0)) self.conv2d_1_horizontal = nn.Conv2d(in_channels=self.n_channels_0_horizontal, out_channels=self.n_channels_1_horizontal, kernel_size=self.kernel_size_horizontal, padding=(0,1), groups=input_size) self.conv2d_2_horizontal = nn.Conv2d(in_channels=self.n_channels_1_horizontal, out_channels=self.n_channels_2_horizontal, kernel_size=self.kernel_size_horizontal, padding=(0,1)) self.conv2d_3_horizontal = nn.Conv2d(in_channels=self.n_channels_2_horizontal, out_channels=self.n_channels_3_horizontal, kernel_size=self.kernel_size_horizontal, padding=(0,1)) def vertical_convolution(self, x): # expected convolution input shape = (batch, channel, H, W) # [1, 1, 50, 30] x = self.conv2d_1_vertical(x) x = self.leakyrelu(x) # [1, 3, 50, 30] x = self.conv2d_2_vertical(x) x = self.leakyrelu(x) x = self.conv2d_3_vertical(x) x = self.leakyrelu(x) # x = self.conv2d_4_vertical(x) # x = self.leakyrelu(x) return x def horizontal_convolution(self, x): # expected convolution input shape = (batch, channel, H, W) # [1, 1, 50, 30] x = self.conv2d_1_horizontal(x) x = self.leakyrelu(x) # [1, 3, 50, 30] x = self.conv2d_2_horizontal(x) x = self.leakyrelu(x) x = self.conv2d_3_horizontal(x) x = self.leakyrelu(x) return x def forward(self, x): # expected convolution input shape = (N, C, H, W) # [1, 1, 50, 30] x_vertical = self.vertical_convolution(x) x_horizontal = self.horizontal_convolution(x) # [1, 6, 50, 30] n, c, h, w = x_vertical.shape x_vertical = x_vertical.view([n,h*c,w]) # [1, 6, 50, 30] n, c, h, w = x_horizontal.shape x_horizontal = x_horizontal.view([n,h*c,w]) # print(x_vertical.shape) # print(x_horizontal.shape) x = torch.cat([x_vertical, x_horizontal], dim=1) # [1, 300, 30] # x = x.permute([0,2,1]) return x class Decoder(nn.Module): def __init__(self, input_size, hidden_size, n_layers, output_size, dropout_rate): super(Decoder, self).__init__() self.rnn_input_size = input_size self.rnn_hidden_size = hidden_size # aka output size self.rnn_n_layers = n_layers self.bidirectional = True self.n_directions = int(self.bidirectional)+1 self.gru = nn.GRU(input_size=self.rnn_input_size, hidden_size=self.rnn_hidden_size, num_layers=self.rnn_n_layers, batch_first=True, dropout=dropout_rate, bidirectional=self.bidirectional) self.output_linear = torch.nn.Linear(hidden_size*self.n_directions, output_size) self.leakyrelu = torch.nn.LeakyReLU() def output_fcn(self, x): x = self.output_linear(x) return x def output_function(self, x): # print("output", x.shape) # shape = (N, H, L) = (batch_size, hidden, length) # (1,16,32) batch_size, hidden, length = x.shape outputs = list() for l in range(length): x_i = x[:,:,l] x_i = x_i.view(batch_size, hidden, 1) x_i = x_i.permute([0,2,1]) x_i = self.output_linear(x_i) outputs.append(x_i) x = torch.cat(outputs, dim=1) x = x.permute([0,2,1]) return x def forward(self, x): # input: (batch, seq_len, input_size) # hidden: (num_layers * num_directions, batch, hidden_size) x = x.permute([0,2,1]) output, h_n = self.gru(x) # [1, 30, 5] x = output.permute([0,2,1]) # [1, 5, 30] x = self.output_function(x) return x class EncoderDecoder(nn.Module): def __init__(self, input_size, output_size, hidden_size, n_layers, dropout_rate): super(EncoderDecoder, self).__init__() self.encoder = Encoder(input_size=input_size) self.decoder = Decoder(input_size=8400, output_size=output_size, hidden_size=hidden_size, n_layers=n_layers, dropout_rate=dropout_rate) def forward(self, x): x = self.encoder(x) # print(x.shape) x = self.decoder(x) return x