text stringlengths 38 1.54M |
|---|
from rest_framework.test import APITestCase
from rest_framework import status
from django.contrib.auth.models import User
from rest_framework_jwt.settings import api_settings
import json
JWT_DECODE_HANDLER = api_settings.JWT_DECODE_HANDLER
class AuthTests(APITestCase):
def tearDown(self):
User.objects.all().delete()
def test_signup(self):
response = self.client.post("/auth/signup/",{'username': 'John', 'password': 'complex_password'})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(User.objects.count(), 1)
def test_signup_exist_user(self):
response = self.client.post("/auth/signup/", {'username': 'John', 'password': 'complex_password'})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.post("/auth/signup/", {'username': 'John', 'password': 'complex_password'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(User.objects.count(), 1)
def test_signup_bad_request(self):
response = self.client.post("/auth/signup/",{'username': 'John'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_signin(self):
self.client.post("/auth/signup/",{'username': 'John', 'password': 'complex_password'})
response = self.client.post("/auth/signin/", {'username': 'John', 'password': 'complex_password'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(JWT_DECODE_HANDLER(json.loads(response.content)['token'])['username'], "John")
def test_signin_login_failed(self):
self.client.post("/auth/signup/",{'username': 'John', 'password': 'complex_password'})
response = self.client.post("/auth/signin/", {'username': 'John', 'password': 'no password'})
self.assertEqual(json.loads(response.content)['Message'], "Failed") |
num = input("Enter a number: ")
def validate_num(n):
rest = [int(i) for i in list(n)][::-1]
check = rest.pop(0)
for i in range(len(rest)):
if i %2 == 0:
rest[i] *= 2
for i in rest:
if i > 9:
rest[rest.index(i)] -= 9
return ((sum(rest) + int(check))%10 == 0)
print(f"Is your credit card number valid? :{validate_num(num)}")
|
# coding: utf-8
from flask import Flask
from flask import render_template
from flask import request,redirect,flash,get_flashed_messages,url_for,escape
from sqlalchemy import *
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
import datetime
app = Flask(__name__)
app.config['DEBUG'] = True
app.config["SECRET_KEY"] = "KOZHEDED"
app.config["REMEMBER_COOKIE_DURATION"] = datetime.timedelta(minutes=30)
engine = create_engine('sqlite:///kozheded.db',pool_recycle=3600, encoding="utf-8")
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Base.query = db_session.query_property()
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
class Joke(Base):
__tablename__ = 'jokes'
id = Column(Integer, primary_key=True)
text = Column(Text)
timestamp = Column(DateTime)
def __init__(self, text):
self.text = text
self.timestamp = datetime.datetime.now()
def init_db():
Base.metadata.create_all(bind=engine)
joke = Joke(u'Когда я умер, не было никого, кто бы не засмеялся!')
db_session.add(joke)
db_session.commit()
@app.route('/')
def index():
jokes = Joke.query.order_by('timestamp desc')
return render_template('index.html', jokes=jokes)
@app.route('/add', methods=['GET', 'POST'])
def add():
if request.method == 'GET':
return render_template('add.html')
if request.method == 'POST':
text = escape(request.form['text'])
timestamp = datetime.datetime.now()
if len(text) > 0:
joke = Joke(text)
db_session.add(joke)
db_session.commit()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
import os
from flask import Flask, url_for, request, redirect, render_template, make_response, jsonify
from flask.views import View
from werkzeug.wrappers import Response
from flask_sqlalchemy import SQLAlchemy
import settings
from application import user
project_dir = os.path.dirname(os.path.abspath(__file__))
database_file = "sqlite:///{}".format(os.path.join(project_dir, "database.db"))
"""
创建实例app,接收包或模块的名字作为参数,一般都传递__name__;
可以让flask.helpers.get_root_path函数通过传入这个名字确定程序的根目录,
以便获得静态文件和模板文件的目录。
Flask类实现了一个wsgi应用
"""
app = Flask(__name__, template_folder='templates')
app.register_blueprint(user.user_bp)
"""
配置管理
app.config['DEBUG'] = True
app.config.update(DEBUG=True, SECRET_KEY='...')
app.config.from_object('settings') # 通过字符串的模块名字
# 默认配置文件不存在时会抛异常,使用silent=True只返回False,但不抛异常
app.config.from_pyfile('settings.py', silent=True)
# > export SETTINGS='settings.py'
app.config.from_envvar('SETTINGS') # 通过环境变量加载,获取路径后调用from_pyfile方式加载
"""
app.config.from_object(settings) # 引用模块,然后导入模块对象
app.config["SQLALCHEMY_DATABASE_URI"] = database_file # 配置数据库
"""
建立数据库链接,初始化db变量,通过db访问数据库
"""
db = SQLAlchemy(app)
# 定义model
class Book(db.Model):
title = db.Column(db.String(80), unique=True, nullable=False, primary_key=True)
def __repr__(self):
return "<Title: {}>".format(self.title)
"""
app.route装饰器会將URL和执行的视图函数的关系保存到app.url_map属性上;
处理URL和视图函数的关系的程序就是路由;
wesmart就是视图函数;
"""
@app.route('/')
def wesmart():
return {'message': "Hello Flask !"}
# 响应response: (response, status, headers)
@app.route('/custom_headers')
def headers():
return {'headers': [1, 2, 3]}, 201, [('X-Request-Id', '100')]
# 动态URL规则
@app.route('/article/<iid>')
def article(iid):
return "Item:{}".format(iid)
# /article?page_name=1 or /blog?page_name=1
@app.route('/<any(article, blog):page_name>')
def item(url_path):
return url_path
@app.route('/people/', methods=['POST', 'GET'])
def people():
name = request.args.get('name')
if not name:
return redirect(url_for('login'))
user_agent = request.headers.get('User-Agent')
return 'Name: {0}; UA: {1}'.format(name, user_agent)
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
user_id = request.form.get('user_id')
return 'User: {} login'.format(user_id)
else:
return 'Open Login page'
@app.route('/book', methods=['GET', 'POST'])
def home():
try:
if request.form:
# print(request.form)
book = Book(title=request.form.get("title"))
db.session.add(book)
db.session.commit()
except Exception as e:
print("Failed to add book")
print(e)
books = Book.query.all()
return render_template('home.html', books=books)
@app.route('/update_book', methods=['POST'])
def update():
try:
newtitle = request.form.get("newtitle")
oldtitle = request.form.get("oldtitle")
book = Book.query.filter_by(title=oldtitle).first()
book.title = newtitle
db.session.commit()
except Exception as e:
print("Couldn't update book title")
print(e)
return redirect("/book")
@app.route("/delete_book", methods=["POST"])
def delete():
title = request.form.get("title")
book = Book.query.filter_by(title=title).first()
db.session.delete(book)
db.session.commit()
return redirect("/book")
@app.errorhandler(404)
def not_found(error):
resp = make_response(render_template('error.html'), 404)
return resp
with app.test_request_context():
print(url_for('article', iid='1'))
class JSONResponse(Response):
"""
自定义Response对象,继承Response > BaseResponse
default_mimetype='text/plain' (默认,纯文本)
default_mimetype='text/html' (html 文件)
default_mimetype='application/json' (json对象)
"""
# 若接口响应是html页面
default_mimetype = 'text/html'
@classmethod
def force_type(cls, response, environ=None):
if isinstance(response, dict):
response = jsonify(response)
return super(JSONResponse, cls).force_type(response, environ)
class BaseView(View):
def get_template_name(self):
raise NotImplementedError()
def render_template(self, context):
return render_template(self.get_template_name(), **context)
def dispatch_request(self):
if request.method != 'GET':
return 'UNSUPPORTED!'
context = {'users': self.get_users()}
return self.render_template(context)
class UserView(BaseView):
def get_template_name(self):
return 'users.html'
def get_users(self):
return [{
'username': 'fake',
'avatar': 'http://lorempixel.com/100/100/nature/'
}]
app.add_url_rule('/users', view_func=UserView.as_view('userview'))
"""
if 语句可以保证当其他文件引用此文件时不会执行这个判断内的代码;
比如from app_server import app,不会执行app.run函数;
总之,在导入manager.py脚本时不会启动flask应用程序。
"""
if __name__ == '__main__':
"""
app.run(debug=True),run时开启调试模式
或者
app.debug = True
不能用于生产环境中
"""
app.debug = app.config.get('DEBUG', False) # 开启调试模式
app.response_class = JSONResponse
"""
app.run启动服务;
默认Flask只监听本地IP:127.0.0.1,端口为5000;若指定转发端口8788,需要指定host和port参数;
0.0.0.0表示监听所有地址;
服务启动后会调用werkzeug.serving.run_simple进入轮询,
默认使用单进程单线程的werkzeug.serving.BaseWSGIServer处理请求,
实际使用标准库BaseHTTPServer.HTTPServer,通过select.select做0.5秒的'while True'的事件轮询。
当访问‘http://0.0.0.0:8788/’,通过app.url_map找到注册的'/'这个URL模式,
被装饰的函数称为视图函数;若找不到对应的模式,状态码为404。
默认的app.run的启动方式只适合调试,不要在生产环境中使用;
生产环境应该使用Gunicorn或uWSGI。
"""
app.run(host='0.0.0.0', port=8788)
|
from django.contrib import admin
# Register your models here.
from .models import contact, advice
admin.site.register(contact)
admin.site.register(advice)
|
#!/usr/bin/python
__author__ = 'thovo'
import sys
def ibm1():
#Check for arguments
args_length = len(sys.argv)
print "The number of arguments: "+str(args_length)
i = 0
while i < args_length:
print "The argument number " + str(i) + " is " + str(sys.argv[i])
i += 1
ibm1() |
from PIL import Image
from matplotlib import pyplot as plt
histo = [0]*256
histo2 = [0]*256
histo3 = [0]*256
cdf = [0 for i in range(256)]
image = Image.open('lena.bmp')
dark_image = image.copy()
result = dark_image.copy()
(h , w) = image.size
for i in range(h):
for j in range(w):
dark_image.putpixel((i , j) , (image.getpixel((i , j)) / 3) * 2)
histo[image.getpixel((i , j))] += 1
histo2[dark_image.getpixel((i , j))] += 1
tmp = 0
min_num = 0
for i in range(256):
tmp += histo2[i]
cdf[i] = tmp
for i in range(256):
if cdf[i] != 0:
min_num = i
break
#print min_num
for i in range(w):
for j in range(h):
hv = round( (float(cdf[dark_image.getpixel((i , j))] - cdf[min_num]) / float((h*w)-cdf[min_num])) * (255.0) )
result.putpixel((i , j) , int(hv))
histo3[result.getpixel((i , j))] += 1
dark_image.show()
dark_image.save('C:\Users\user\Documents\computer_vision\dark_image.bmp')
result.show()
result.save('C:\Users\user\Documents\computer_vision\hito_equal.bmp')
plt.bar(range(0 , 256) , histo)
plt.show()
plt.bar(range(0 , 256) , histo2)
plt.show()
plt.bar(range(0 , 256) , histo3)
plt.show()
|
#===============================================================================
# 31564: Cancel adding a subject in new message/multimedia message
#
# Procedure:
# 1. Open Messaging/multimedia message app
# 2. Create a new message/multimedia message
# 3.Tap on the top-right icon to show the options menu (ER1)
# 4. Tap on Cancel (ER2)
#
# Expected result:
# ER1. It appears a screen giving options to Add subject and Cancel
# ER2. User is taken back to the compose message view from which the
# Add subject screen was launched
#===============================================================================
from gaiatest import GaiaTestCase
from OWDTestToolkit import DOM
from OWDTestToolkit.utils.utils import UTILS
from OWDTestToolkit.apps.messages import Messages
class test_main(GaiaTestCase):
test_msg = "Hello World"
test_subject = "My Subject"
def setUp(self):
# Set up child objects...
GaiaTestCase.setUp(self)
self.UTILS = UTILS(self)
self.messages = Messages(self)
# Establish which phone number to use.
self.phone_number = self.UTILS.general.get_config_variable("phone_number", "custom")
def tearDown(self):
self.UTILS.reporting.reportResults()
GaiaTestCase.tearDown(self)
def test_run(self):
# Launch messages app.
self.messages.launch()
# Create a new SMS
self.messages.startNewSMS()
# Insert the phone number in the To field
self.messages.addNumbersInToField([self.phone_number])
# Create MMS.
self.messages.enterSMSMsg(self.test_msg)
# Add subject
self.messages.addSubject(self.test_subject)
# Press cancel options button
self.messages.cancelSettings()
# Review settings options button
self.UTILS.reporting.logResult("info", "Cliking on messages options button")
options_btn = self.UTILS.element.getElement(DOM.Messages.messages_options_btn,
"Messages option button is displayed")
self.UTILS.test.test(options_btn, "Settings options.", True)
|
import inputs_fixed_len
import tensorflow as tf
import argparse
import os
import model
import re
import time
import numpy as np
import utils
import matplotlib.pyplot as plt
import sklearn.ensemble
import sklearn.metrics
import pickle
import itertools
parser = argparse.ArgumentParser()
parser.add_argument('-model', type=str)
parser.add_argument('-train_test_split_ratio', type=float, default=0.8)
parser.add_argument('-timeseries_cols', type=str, nargs='+')
args = parser.parse_args()
if args.model == 'rf':
model = sklearn.ensemble.RandomForestClassifier(n_estimators=1000, max_depth=5,
min_samples_leaf=6,
class_weight='balanced')
elif args.model == 'svm':
import sklearn.svm
model = sklearn.svm.SVC(decision_function_shape='ovo')
if args.timeseries_cols:
timeseries_cols = [x for x in utils.columns if any(y.lower() in x.lower() for y in
args.timeseries_cols)]
else:
timeseries_cols = ['Knee adduction moment_l',
'Knee adduction moment_r',
'Knee Flexion Angle_l',
'Knee Flexion Angle_r',
'Hip abduction moment_l',
'Hip abduction moment_r',
'Hip extension moment_l',
'Hip extension moment_r']
print(timeseries_cols)
timeseries_cols_wo_leg = sorted(set([x.split('_')[0] for x in timeseries_cols]))
timeseries_cols_to_str = '_'.join([
''.join([y[0] for y in x.split()])
for x in timeseries_cols_wo_leg])
timeseries_cols = sorted(timeseries_cols)
npz_dir = lambda x: 'data/tfrecord_data/{}/{}.pkl'.format(timeseries_cols_to_str, x)
if not os.path.exists(npz_dir('train')):
with tf.Graph().as_default():
with tf.variable_scope('inputs'):
next_batch, trn_init_op, test_init_op = inputs_fixed_len.inputs(
9999,
args.train_test_split_ratio,
timeseries_cols,
timeseries_cols_to_str)
with tf.Session() as sess:
sess.run(trn_init_op)
train_data = sess.run(next_batch)
sess.run(test_init_op)
test_data = sess.run(next_batch)
with open(npz_dir('train'), 'wb') as f:
pickle.dump(train_data, f)
with open(npz_dir('test'), 'wb') as f:
pickle.dump(test_data, f)
else:
with open(npz_dir('train'), 'rb') as f:
train_data = pickle.load(f)
with open(npz_dir('test'), 'rb') as f:
test_data = pickle.load(f)
train_x = np.concatenate((train_data['x_t_agg'], train_data['x_s']), axis=1)
train_y = train_data['y']
test_x = np.concatenate((test_data['x_t_agg'], test_data['x_s']), axis=1)
test_y = test_data['y']
model.fit(train_x, train_y)
train_pred = model.predict(train_x)
test_pred = model.predict(test_x)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
def get_accuracy(y, pred):
for i, leg in zip(range(2), 'rl'):
tmp_y = y[:, i]
tmp_pred = pred[:, i]
acc = sklearn.metrics.accuracy_score(tmp_y, tmp_pred)
cnf_matrix = sklearn.metrics.confusion_matrix(tmp_y, tmp_pred)
plt.figure()
plot_confusion_matrix(cnf_matrix, ['0', '1', '2', '3', '4'], True,
timeseries_cols_to_str+'_'+leg)
plt.show()
print(leg, acc)
get_accuracy(train_y, train_pred)
get_accuracy(test_y, test_pred)
if args.model == 'rf':
print(model.feature_importances_)
# model.predict(test_data['x_t_agg'])
#
#
#
#
#
# for _ in range(args.num_epochs):
# print('epoch num', epoch_num, 'batch iteration', global_step)
# prev = time.time()
# sess.run(trn_init_op)
# sess.run(tf.local_variables_initializer())
#
# trn_feed = {model.is_training: True}
#
# a = sess.run(model.get)
#
# try:
# while True:
# if global_step % args.save_interval == 0:
# _, global_step, trn_loss_summary, _ = sess.run([model.train_op,
# model.global_step,
# model.trn_running_summary,
# model.summary_update_ops
# ],
# trn_feed
# )
#
# summary_writer.add_summary(trn_loss_summary, epoch_num)
# else:
# _, global_step, loss, _ = sess.run([model.train_op,
# model.global_step,
# model.loss,
# model.summary_update_ops
# ],
# trn_feed
# )
#
# except tf.errors.OutOfRangeError:
# sess.run(model.increment_epoch_op)
# epoch_num = sess.run(model.epoch)
# print('out of range', 'epoch', epoch_num, 'iter', global_step)
# now = time.time()
# summary_value, trn_acc = sess.run([model.summary_trn,
# model.accuracy],
# {model.is_training: False})
# summary_writer.add_summary(summary_value, global_step=epoch_num)
#
# sess.run(test_init_op)
# sess.run(tf.local_variables_initializer()) # metrics value init to 0
#
# try:
# print('test_start')
# tmp_step = 0
#
# while True:
# if tmp_step % args.save_interval == 0:
# _, test_loss_summary = sess.run([model.summary_update_ops,
# model.test_running_summary],
# {model.is_training: False})
# summary_writer.add_summary(test_loss_summary,
# global_step=epoch_num)
# else:
# sess.run(model.summary_update_ops, {model.is_training: False})
#
# tmp_step += 1
#
# except tf.errors.OutOfRangeError:
# print('test_start end')
# summary_value, test_acc = sess.run([model.summary_test,
# model.accuracy],
# {
# model.is_training: False})
# summary_writer.add_summary(summary_value, global_step=epoch_num)
#
# minutes = (now - prev) / 60
# result = 'num iter: {} | trn_acc : {} test acc : {}'.format(
# global_step, trn_acc, test_acc)
#
# message = 'took {} min'.format(minutes)
# print(model_dir)
# print(result)
# print(message)
#
# saver.save(sess, os.path.join(model_dir, 'model.ckpt'),
# global_step=epoch_num)
#
|
from resources import database
from .helpers import login, signup
async def resolve_login(_, info, **kwargs):
try:
username = kwargs.get('username')
password = kwargs.get('password')
token = login(username, password)
if token :
payload = {
"success": True,
"token": token
}
else:
payload = {
"success": False,
"errors": ["Incorrect username or password"]
}
except Exception as error:
payload = {
"success": False,
"errors": [str(error)]
}
return payload
async def resolve_signup(_, info, **kwargs):
try:
username = kwargs.get('username')
password = kwargs.get('password')
token = signup(username, password)
if token :
payload = {
"success": True,
"token": token
}
else:
payload = {
"success": False,
"errors": ["Couldn't create user."]
}
except Exception as error:
payload = {
"success": False,
"errors": [str(error)]
}
return payload |
# coding=utf-8
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
db = SQLAlchemy()
class names(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.UnicodeText)
epithet_id = db.Column(db.Integer)
class epithets(db.Model):
id = db.Column(db.Integer, primary_key=True)
epithet = db.Column(db.UnicodeText) |
from dataProcessor import ImageFileHandler
from Classifiers.SVM.lsvc import LinearSupportVectorClassifier
import logging
logging.basicConfig(filename="svm.log",level=logging.INFO)
def logging_wrapper(func):
def inner(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
logging.exception("There was an exception {} in function {}".format(str(e),str(func)))
return inner
@logging_wrapper
def main():
f = open("LinearSVCDetails.txt","w")
DataPath = "Data/Processed/"
imf = ImageFileHandler(DataPath + "train_m50_p5_a0.npy", y_index=0)
lsvc = LinearSupportVectorClassifier(imf.xMatrix, imf.yVector)
f.write("Data Loaded and Classifier initialized\n")
f.write("Starting hyper-parameter tuning\n")
best_params, best_score, results = lsvc.find_best_params()
f.write("The best hyper-parameters are as follows: \n")
f.write("C: {}\t| tol: {} with an F1-Measure of {}\n\n".format(
best_params['C'], best_params['tol'], best_score
))
f.write("\nPerformance metrics for the first 100 hyper-parameters_tested:\n\n")
index = 0
while (index < 100 and index < len(results['params'])):
f.write("C: {}\t| tol: {} --> {}\n".format(
results['params'][index]['C'],
results['params'][index]['tol'],
results['mean_test_score'][index]
))
index += 1
f.write("\n\nInitializing and training a Linear Support Vector Classifier with C={} and tol={} \n".format(
best_params['C'], best_params['tol']))
best_C = float(best_params['C'])
best_tol = float(best_params['tol'])
lsvc = LinearSupportVectorClassifier(imf.xMatrix, imf.yVector)
lsvc.initialize_classifier(tol=best_tol, C=best_C)
lsvc.train()
imf_test = ImageFileHandler(DataPath + "test_m50_p5_a0.npy")
predictions = lsvc(imf_test.XMatrix)
f.write("Creating Submissions file\n")
import csv
with open("Data/Raw/categories.csv", mode='r') as infile:
reader = csv.reader(infile)
categories = {i: row[0] for i, row in enumerate(reader)}
with open("submissions.txt", 'w') as file:
file.write('Id,Category\n')
for i, prediction in enumerate(predictions):
file.write(str(i) + ',' + categories[prediction])
file.write('\n')
f.write("Done!\n")
f.close()
if __name__ == "__main__":
main() |
import math
from math import sqrt
import argparse
from pathlib import Path
# torch
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
# vision imports
from torchvision import transforms as T
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
# dalle classes and utils
from dalle_pytorch import distributed_utils
from dalle_pytorch import DiscreteVAE
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--image_folder', type = str, required = True,
help='path to your folder of images for learning the discrete VAE and its codebook')
parser.add_argument('--image_size', type = int, required = False, default = 128,
help='image size')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--epochs', type = int, default = 20, help = 'number of epochs')
train_group.add_argument('--batch_size', type = int, default = 8, help = 'batch size')
train_group.add_argument('--learning_rate', type = float, default = 1e-3, help = 'learning rate')
train_group.add_argument('--lr_decay_rate', type = float, default = 0.98, help = 'learning rate decay')
train_group.add_argument('--starting_temp', type = float, default = 1., help = 'starting temperature')
train_group.add_argument('--temp_min', type = float, default = 0.5, help = 'minimum temperature to anneal to')
train_group.add_argument('--anneal_rate', type = float, default = 1e-6, help = 'temperature annealing rate')
train_group.add_argument('--num_images_save', type = int, default = 4, help = 'number of images to save')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--num_tokens', type = int, default = 8192, help = 'number of image tokens')
model_group.add_argument('--num_layers', type = int, default = 3, help = 'number of layers (should be 3 or above)')
model_group.add_argument('--num_resnet_blocks', type = int, default = 2, help = 'number of residual net blocks')
model_group.add_argument('--smooth_l1_loss', dest = 'smooth_l1_loss', action = 'store_true')
model_group.add_argument('--emb_dim', type = int, default = 512, help = 'embedding dimension')
model_group.add_argument('--hidden_dim', type = int, default = 256, help = 'hidden dimension')
model_group.add_argument('--kl_loss_weight', type = float, default = 0., help = 'KL loss weight')
args = parser.parse_args()
# constants
IMAGE_SIZE = args.image_size
IMAGE_PATH = args.image_folder
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
LR_DECAY_RATE = args.lr_decay_rate
NUM_TOKENS = args.num_tokens
NUM_LAYERS = args.num_layers
NUM_RESNET_BLOCKS = args.num_resnet_blocks
SMOOTH_L1_LOSS = args.smooth_l1_loss
EMB_DIM = args.emb_dim
HIDDEN_DIM = args.hidden_dim
KL_LOSS_WEIGHT = args.kl_loss_weight
STARTING_TEMP = args.starting_temp
TEMP_MIN = args.temp_min
ANNEAL_RATE = args.anneal_rate
NUM_IMAGES_SAVE = args.num_images_save
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
# data
ds = ImageFolder(
IMAGE_PATH,
T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.Resize(IMAGE_SIZE),
T.CenterCrop(IMAGE_SIZE),
T.ToTensor()
])
)
if distributed_utils.using_backend(distributed_utils.HorovodBackend):
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds, num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank())
else:
data_sampler = None
dl = DataLoader(ds, BATCH_SIZE, shuffle = not data_sampler, sampler=data_sampler)
vae_params = dict(
image_size = IMAGE_SIZE,
num_layers = NUM_LAYERS,
num_tokens = NUM_TOKENS,
codebook_dim = EMB_DIM,
hidden_dim = HIDDEN_DIM,
num_resnet_blocks = NUM_RESNET_BLOCKS
)
vae = DiscreteVAE(
**vae_params,
smooth_l1_loss = SMOOTH_L1_LOSS,
kl_div_loss_weight = KL_LOSS_WEIGHT
)
if not using_deepspeed:
vae = vae.cuda()
assert len(ds) > 0, 'folder does not contain any images'
if distr_backend.is_root_worker():
print(f'{len(ds)} images found for training')
# optimizer
opt = Adam(vae.parameters(), lr = LEARNING_RATE)
sched = ExponentialLR(optimizer = opt, gamma = LR_DECAY_RATE)
if distr_backend.is_root_worker():
# weights & biases experiment tracking
import wandb
model_config = dict(
num_tokens = NUM_TOKENS,
smooth_l1_loss = SMOOTH_L1_LOSS,
num_resnet_blocks = NUM_RESNET_BLOCKS,
kl_loss_weight = KL_LOSS_WEIGHT
)
run = wandb.init(
project = 'dalle_train_vae',
job_type = 'train_model',
config = model_config
)
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {'train_batch_size': BATCH_SIZE}
(distr_vae, distr_opt, distr_dl, distr_sched) = distr_backend.distribute(
args=args,
model=vae,
optimizer=opt,
model_parameters=vae.parameters(),
training_data=ds if using_deepspeed else dl,
lr_scheduler=sched if not using_deepspeed else None,
config_params=deepspeed_config,
)
using_deepspeed_sched = False
# Prefer scheduler in `deepspeed_config`.
if distr_sched is None:
distr_sched = sched
elif using_deepspeed:
# We are using a DeepSpeed LR scheduler and want to let DeepSpeed
# handle its scheduling.
using_deepspeed_sched = True
def save_model(path):
save_obj = {
'hparams': vae_params,
}
if using_deepspeed:
cp_path = Path(path)
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = str(path_sans_extension) + '-ds-cp'
distr_vae.save_checkpoint(cp_dir, client_state=save_obj)
# We do not return so we do get a "normal" checkpoint to refer to.
if not distr_backend.is_root_worker():
return
save_obj = {
**save_obj,
'weights': vae.state_dict()
}
torch.save(save_obj, path)
# starting temperature
global_step = 0
temp = STARTING_TEMP
for epoch in range(EPOCHS):
for i, (images, _) in enumerate(distr_dl):
images = images.cuda()
loss, recons = distr_vae(
images,
return_loss = True,
return_recons = True,
temp = temp
)
if using_deepspeed:
# Gradients are automatically zeroed after the step
distr_vae.backward(loss)
distr_vae.step()
else:
distr_opt.zero_grad()
loss.backward()
distr_opt.step()
logs = {}
if i % 100 == 0:
if distr_backend.is_root_worker():
k = NUM_IMAGES_SAVE
with torch.no_grad():
codes = vae.get_codebook_indices(images[:k])
hard_recons = vae.decode(codes)
images, recons = map(lambda t: t[:k], (images, recons))
images, recons, hard_recons, codes = map(lambda t: t.detach().cpu(), (images, recons, hard_recons, codes))
images, recons, hard_recons = map(lambda t: make_grid(t.float(), nrow = int(sqrt(k)), normalize = True, range = (-1, 1)), (images, recons, hard_recons))
logs = {
**logs,
'sample images': wandb.Image(images, caption = 'original images'),
'reconstructions': wandb.Image(recons, caption = 'reconstructions'),
'hard reconstructions': wandb.Image(hard_recons, caption = 'hard reconstructions'),
'codebook_indices': wandb.Histogram(codes),
'temperature': temp
}
wandb.save('./vae.pt')
save_model(f'./vae.pt')
# temperature anneal
temp = max(temp * math.exp(-ANNEAL_RATE * global_step), TEMP_MIN)
# lr decay
# Do not advance schedulers from `deepspeed_config`.
if not using_deepspeed_sched:
distr_sched.step()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
if distr_backend.is_root_worker():
if i % 10 == 0:
lr = distr_sched.get_last_lr()[0]
print(epoch, i, f'lr - {lr:6f} loss - {avg_loss.item()}')
logs = {
**logs,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item(),
'lr': lr
}
wandb.log(logs)
global_step += 1
if distr_backend.is_root_worker():
# save trained model to wandb as an artifact every epoch's end
model_artifact = wandb.Artifact('trained-vae', type = 'model', metadata = dict(model_config))
model_artifact.add_file('vae.pt')
run.log_artifact(model_artifact)
if distr_backend.is_root_worker():
# save final vae and cleanup
save_model('./vae-final.pt')
wandb.save('./vae-final.pt')
model_artifact = wandb.Artifact('trained-vae', type = 'model', metadata = dict(model_config))
model_artifact.add_file('vae-final.pt')
run.log_artifact(model_artifact)
wandb.finish()
|
import os
import csv
import multiprocessing
import pdf2image
# For some reason pytype doesn't like pdftotext
import pdftotext # type: ignore
from typing import Tuple, List
def _get_image_tag(image_filename: str) -> str:
return "<img src='" + image_filename + "'>"
class PDFToAnkiCardsConverter:
def __init__(
self,
pdf_file_path: str,
skip_first: int = 0,
skip_last: int = 0,
merge_consecutive_cards_with_same_title: bool = True,
password: str = "",
get_title_from_lines: Tuple[int] = (0,),
title_line_seperator: str = "\n",
):
self._pdf_file_path = pdf_file_path
self._skip_first = skip_first
self._skip_last = skip_last
self._merge_consecutive_cards_with_same_title = (
merge_consecutive_cards_with_same_title
)
self._password = password
self._get_title_from_lines = get_title_from_lines
self._title_line_seperator = title_line_seperator
self._titles = None # type: List[str]
def get_image_filename(self, page_index: int) -> str:
return (
self._pdf_file_path.replace(os.sep, "_").replace(".", "_")
+ "_page_"
+ str(page_index + self._skip_first + 1)
+ ".jpg"
)
def output_images_to_directory(
self,
output_directory: str,
dpi: int = 200,
thread_count: int = multiprocessing.cpu_count(),
) -> None:
page_count = pdf2image.pdfinfo_from_path(
self._pdf_file_path, userpw=self._password
)["Pages"]
first_page = self._skip_first
last_page = page_count - self._skip_last
images = pdf2image.convert_from_path(
self._pdf_file_path,
dpi=dpi,
first_page=first_page,
last_page=last_page,
thread_count=thread_count,
userpw=self._password,
)
for image_number, image in enumerate(images):
image.save(
os.path.join(output_directory, self.get_image_filename(image_number))
)
def _get_title_for_page(self, pdf_page: str) -> str:
lines_of_page = pdf_page.split("\n")
lines_for_title = (
lines_of_page[line].strip() for line in self._get_title_from_lines
)
return self._title_line_seperator.join(lines_for_title)
def get_page_titles(self) -> List[str]:
if self._titles is None:
self._titles = []
with open(self._pdf_file_path, "rb") as f:
pdf = pdftotext.PDF(f, self._password)
# Slicing not supported here
for page in range(self._skip_first, len(pdf) - self._skip_last):
title = self._get_title_for_page(pdf[page])
self._titles.append(title)
return self._titles
def _should_merge_cards(self, card_title_1: str, card_title_2: str) -> bool:
return (
self._merge_consecutive_cards_with_same_title
and card_title_1 == card_title_2
)
def output_cards_to_csv_file(self, csv_file) -> None:
titles = self.get_page_titles()
previous_title = titles[0]
current_image_xml = ""
num_titles = len(titles)
for title_number, title in enumerate(titles):
image_filename = self.get_image_filename(title_number)
is_last_card = title_number == num_titles
if is_last_card or not self._should_merge_cards(title, previous_title):
# Note: writing previous card here
csv_file.writerow((previous_title, current_image_xml))
current_image_xml = ""
current_image_xml += _get_image_tag(image_filename)
previous_title = title
csv_file.writerow((title, current_image_xml))
|
import argparse
import copy
import json
import os
import pickle
import re
import sys
import traceback
from collections import Counter, defaultdict
import glob
import itertools
import shutil
import difflib
from nltk import word_tokenize, pos_tag, bigrams, ngrams
from canonical_relations import canonical_relations as canonical_relations_dict
from canonical_functions import canonical_functions as canonical_functions_dict
import pdb
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import seaborn as sns
sns.set(font_scale=1.15)
def is_annotatable_markable(markable):
if markable["generic"] or markable["no-referent"] or markable["all-referents"] or (markable["anaphora"] is not None) or (markable["cataphora"] is not None) or (markable["predicative"] is not None):
return False
else:
return True
class Tags:
@classmethod
def Input(x):
return ["<input>"] + x + ["</input>"]
Context = "input"
Dialogue = "dialogue"
Output = "output"
PartnerContext = "partner_input"
class Markable:
def __init__(self, markable_id, label, text, start, end):
self.markable_id = markable_id
self.label = label
self.text = text
self.start = start
self.end = end
# attributes
self.no_referent = False
self.all_referents = False
self.generic = False
# relations
self.anaphora = None
self.cataphora = None
self.predicative = None
self.fixed_text = None
self.speaker = None
def read_json(path):
try:
return json.load(open(path))
except:
raise Exception('Error reading JSON from %s' % path)
def dump_json(file, path):
try:
with open(path, "w") as fout:
json.dump(file, fout, indent=4, sort_keys=True)
except:
raise Exception('Error writing JSON to %s' % path)
def span_agreement(args, dialogue_corpus, annotators):
"""
Compute span agreement based on Cohen's Kappa.
Agreement is calculated at the token level judgements.
"""
def _add_span(annotation, start_idxs, span_start_idx, span_end_idx, add_zero=False):
assert len(annotation) == len(start_idxs)
assert span_start_idx <= span_end_idx
for i in range(len(start_idxs)):
if i == len(start_idxs) - 1:
if start_idxs[i] <= span_start_idx:
if add_zero:
annotation[i] = 0
else:
annotation[i] = 1
break
if start_idxs[i] <= span_start_idx and span_start_idx < start_idxs[i+1]:
for j in range(i, len(start_idxs)):
if span_end_idx < start_idxs[j]:
return annotation
if add_zero:
annotation[j] = 0
else:
annotation[j] = 1
return annotation
def _compute_agreement(chat_ids, annotator_1, annotator_2, valid_mask):
total_agreed = 0
total_valid = 0
total_annotator_1_positive = 0
total_annotator_2_positive = 0
for chat_id in chat_ids:
assert len(annotator_1[chat_id]) == len(annotator_2[chat_id])
assert len(annotator_1[chat_id]) == len(valid_mask[chat_id])
num_agreed = 0
num_valid = sum(valid_mask[chat_id])
num_annotator_1_positive = 0
num_annotator_2_positive = 0
for i in range(len(annotator_1[chat_id])):
if valid_mask[chat_id][i]:
if annotator_1[chat_id][i] == annotator_2[chat_id][i]:
num_agreed += 1
if annotator_1[chat_id][i]:
num_annotator_1_positive += 1
if annotator_2[chat_id][i]:
num_annotator_2_positive += 1
total_agreed += num_agreed
total_valid += num_valid
total_annotator_1_positive += num_annotator_1_positive
total_annotator_2_positive += num_annotator_2_positive
observed_agreement = total_agreed / total_valid
annotator_1_positive_prob = total_annotator_1_positive / total_valid
annotator_2_positive_prob = total_annotator_2_positive / total_valid
expected_agreement = annotator_1_positive_prob * annotator_2_positive_prob + \
(1 - annotator_1_positive_prob) * (1 - annotator_2_positive_prob)
cohens_kappa = (observed_agreement - expected_agreement) / (1 - expected_agreement)
print("total chats: {}".format(len(chat_ids)))
print("total judgements: {}".format(total_valid))
print("observed agreement: {}".format(observed_agreement))
print("expected agreement: {}".format(expected_agreement))
print("Cohen's Kappa: {}".format(cohens_kappa))
markable_annotation = read_json("markable_annotation.json")
relation_span = {}
attribute_span = {}
modifier_span = {}
relation_start = {}
attribute_start = {}
modifier_start = {}
utterance_mask = {}
outside_markable_mask = {}
chat_ids = set()
for filename in glob.glob('span_detection/annotator_1/batch_05/*.ann'):
chat_id = filename.split("/")[3].split(".")[0]
chat_ids.add(chat_id)
for annotator in annotators:
relation_span[annotator] = {}
attribute_span[annotator] = {}
modifier_span[annotator] = {}
relation_start[annotator] = {}
attribute_start[annotator] = {}
modifier_start[annotator] = {}
for filename in glob.glob('span_detection/' + annotator + '/batch_05/*.ann'):
chat_id = filename.split("/")[3].split(".")[0]
tokens = []
utterance_mask[chat_id] = []
outside_markable_mask[chat_id] = []
start_idxs = []
# compute candidate_tokens, utterance_mask, start_idxs
text = markable_annotation[chat_id]["text"]
start_idx = 0
for line in text.split("\n"):
utterance_tokens = line.split(" ")
tokens += utterance_tokens
utterance_mask[chat_id].append(0)
utterance_mask[chat_id] += [1] * (len(utterance_tokens) - 1)
for tok in line.split(" "):
start_idxs.append(start_idx)
start_idx += len(tok) + 1
# compute outside_markable_mask
outside_markable_mask[chat_id] = copy.copy(utterance_mask[chat_id])
for markable in markable_annotation[chat_id]["markables"]:
if not markable["generic"] and (markable["predicative"] is None):
span_start_idx = markable["start"]
span_end_idx = markable["end"]
outside_markable_mask[chat_id] = _add_span(outside_markable_mask[chat_id], start_idxs, span_start_idx, span_end_idx, add_zero=True)
# compute brat_ids of split relations
split_brat_id = set()
with open(filename, "r") as fin:
ann = fin.read()
for line in ann.split("\n"):
brat_id = line.split("\t")[0]
if brat_id.startswith("R"):
label = line.split("\t")[1].split(" ")[0]
if label == "Split":
arg1 = line.split("\t")[1].split(" ")[1].split(":")[1]
arg2 = line.split("\t")[1].split(" ")[2].split(":")[1]
split_brat_id.add(arg1)
# compute relation, attribute, modifier spans
relation_span[annotator][chat_id] = [0] * len(tokens)
attribute_span[annotator][chat_id] = [0] * len(tokens)
modifier_span[annotator][chat_id] = [0] * len(tokens)
relation_start[annotator][chat_id] = [0] * len(tokens)
attribute_start[annotator][chat_id] = [0] * len(tokens)
modifier_start[annotator][chat_id] = [0] * len(tokens)
with open(filename, "r") as fin:
ann = fin.read()
for line in ann.split("\n"):
brat_id = line.split("\t")[0]
if brat_id.startswith("T"):
label = line.split("\t")[1].split(" ")[0]
# compute relation span
if label in ["Spatial-Relation", "Spatial-Relation-Markable"]:
span_start_idx = int(line.split("\t")[1].split(" ")[1])
span_end_idx = int(line.split("\t")[1].split(" ")[2])
relation_span[annotator][chat_id] = _add_span(relation_span[annotator][chat_id],
start_idxs, span_start_idx, span_end_idx)
if not brat_id in split_brat_id:
relation_start[annotator][chat_id] = _add_span(relation_start[annotator][chat_id],
start_idxs, span_start_idx, span_start_idx)
if label == "Spatial-Attribute":
span_start_idx = int(line.split("\t")[1].split(" ")[1])
span_end_idx = int(line.split("\t")[1].split(" ")[2])
attribute_span[annotator][chat_id] = _add_span(attribute_span[annotator][chat_id],
start_idxs, span_start_idx, span_end_idx)
attribute_start[annotator][chat_id] = _add_span(attribute_span[annotator][chat_id],
start_idxs, span_start_idx, span_start_idx)
if label == "Modifier":
span_start_idx = int(line.split("\t")[1].split(" ")[1])
span_end_idx = int(line.split("\t")[1].split(" ")[2])
modifier_span[annotator][chat_id] = _add_span(modifier_span[annotator][chat_id],
start_idxs, span_start_idx, span_end_idx)
modifier_start[annotator][chat_id] = _add_span(attribute_span[annotator][chat_id],
start_idxs, span_start_idx, span_start_idx)
# compute relation agreement
print("relation span agreement")
_compute_agreement(chat_ids, relation_span["annotator_1"], relation_span["annotator_2"], utterance_mask)
print("")
# compute attribute agreement
print("attribute span agreement")
_compute_agreement(chat_ids, attribute_span["annotator_1"], attribute_span["annotator_2"], outside_markable_mask)
print("")
# compute modifier agreement
print("modifier span agreement")
_compute_agreement(chat_ids, modifier_span["annotator_1"], modifier_span["annotator_2"], utterance_mask)
print("")
# compute relation start agreement
print("relation start agreement")
_compute_agreement(chat_ids, relation_start["annotator_1"], relation_start["annotator_2"], utterance_mask)
print("")
# compute attribute start agreement
print("attribute start agreement")
_compute_agreement(chat_ids, attribute_start["annotator_1"], attribute_start["annotator_2"], outside_markable_mask)
print("")
# compute modifier start agreement
print("modifier start agreement")
_compute_agreement(chat_ids, modifier_start["annotator_1"], modifier_start["annotator_2"], utterance_mask)
print("")
def argument_agreement(args, dialogue_corpus, annotators):
"""
1. Compute exact match agreement (based on Cohen's Kappa)
2. Compute essential agreement rate (referents match)
"""
def _compute_agreement(total_pairwise_judgements, total_pairwise_disagreement, total_annotator_1_arguments, total_annotator_2_arguments):
observed_agreement = (total_pairwise_judgements - total_pairwise_disagreement) / total_pairwise_judgements
annotator_1_argument_prob = total_annotator_1_arguments / total_pairwise_judgements
annotator_2_argument_prob = total_annotator_2_arguments / total_pairwise_judgements
expected_agreement = annotator_1_argument_prob * annotator_2_argument_prob + (1 - annotator_1_argument_prob) * (1 - annotator_2_argument_prob)
cohens_kappa = (observed_agreement - expected_agreement) / (1 - expected_agreement)
print("total judgements: {}".format(total_pairwise_judgements))
print("observed agreement: {}".format(observed_agreement))
print("expected agreement: {}".format(expected_agreement))
print("Cohen's Kappa: {}".format(cohens_kappa))
markable_annotation = read_json("markable_annotation.json")
referent_annotation = read_json("aggregated_referent_annotation.json")
annotator_1 = read_json("annotator_1.json")
annotator_2 = read_json("annotator_2.json")
chat_ids = list(annotator_1.keys())
assert chat_ids == list(annotator_2.keys())
# compute utterance_end_idxs
utterance_end_idxs = {}
for chat_id in chat_ids:
utterance_end_idxs[chat_id] = []
text = markable_annotation[chat_id]["text"]
current_len = 0
for line in text.split("\n"):
current_len += len(line) + 1
utterance_end_idxs[chat_id].append(current_len)
assert current_len - 1 == len(text)
# Step 1: Compute subject argument agreement
total_pairwise_judgements = 0
total_pairwise_disagreement = 0
total_annotator_1_arguments = 0
total_annotator_2_arguments = 0
essential_agreement = 0
essential_disagreement = 0
for chat_id in chat_ids:
# compute number of candidate markables in first k utterances
num_candidates = []
for k in range(len(utterance_end_idxs[chat_id])):
num_canditates_k = 0
for markable in markable_annotation[chat_id]["markables"]:
if not markable["generic"] and (markable["predicative"] is None) and markable["start"] < utterance_end_idxs[chat_id][k]:
num_canditates_k += 1
num_candidates.append(num_canditates_k)
# compute num_pairwise_judgements, num_pairwise_agreement, num_annotator_arguments
num_pairwise_judgements = 0
num_pairwise_disagreement = 0
num_annotator_1_arguments = 0
num_annotator_2_arguments = 0
for i in range(len(annotator_1[chat_id]["relations"])):
annotator_1_relation = annotator_1[chat_id]["relations"][i]
annotator_2_relation = annotator_2[chat_id]["relations"][i]
if "is_split" in annotator_1_relation["tags"]:
continue
for k in range(len(utterance_end_idxs[chat_id])):
if annotator_1_relation["start"] < utterance_end_idxs[chat_id][k]:
num_pairwise_judgements += num_candidates[k]
break
is_disagreement = False
for subject in annotator_1_relation["subjects"]:
if subject not in annotator_2_relation["subjects"]:
num_pairwise_disagreement += 1
is_disagreement = True
num_annotator_1_arguments += 1
for subject in annotator_2_relation["subjects"]:
if subject not in annotator_1_relation["subjects"]:
num_pairwise_disagreement += 1
is_disagreement = True
num_annotator_2_arguments += 1
if is_disagreement:
annotator_1_referents = set()
for subject in annotator_1_relation["subjects"]:
for referent in referent_annotation[chat_id][subject]["referents"]:
referent_id = int(referent.split('_')[-1])
annotator_1_referents.add(referent_id)
annotator_2_referents = set()
for subject in annotator_2_relation["subjects"]:
for referent in referent_annotation[chat_id][subject]["referents"]:
referent_id = int(referent.split('_')[-1])
annotator_2_referents.add(referent_id)
if annotator_1_referents == annotator_2_referents:
essential_agreement += 1
else:
essential_disagreement += 1
assert len(annotator_1[chat_id]["attributes"]) == len(annotator_2[chat_id]["attributes"])
for i in range(len(annotator_1[chat_id]["attributes"])):
annotator_1_attribute = annotator_1[chat_id]["attributes"][i]
annotator_2_attribute = annotator_2[chat_id]["attributes"][i]
for k in range(len(utterance_end_idxs[chat_id])):
if annotator_1_attribute["start"] < utterance_end_idxs[chat_id][k]:
num_pairwise_judgements += num_candidates[k]
break
for subject in annotator_1_attribute["subjects"]:
if subject not in annotator_2_attribute["subjects"]:
num_pairwise_disagreement += 1
num_annotator_1_arguments += 1
for subject in annotator_2_attribute["subjects"]:
if subject not in annotator_1_attribute["subjects"]:
num_pairwise_disagreement += 1
num_annotator_2_arguments += 1
total_pairwise_judgements += num_pairwise_judgements
total_pairwise_disagreement += num_pairwise_disagreement
total_annotator_1_arguments += num_annotator_1_arguments
total_annotator_2_arguments += num_annotator_2_arguments
# print subject agreement
print("===subject agreement===")
_compute_agreement(total_pairwise_judgements, total_pairwise_disagreement, total_annotator_1_arguments, total_annotator_2_arguments)
if (essential_agreement + essential_disagreement) > 0:
print("essential agreement: {:.2f}% ({} out of {})".format(100.0 * essential_agreement / (essential_agreement + essential_disagreement),
essential_agreement, (essential_agreement + essential_disagreement)))
# Step 2: Compute object argument agreement
total_pairwise_judgements = 0
total_pairwise_disagreement = 0
total_annotator_1_arguments = 0
total_annotator_2_arguments = 0
essential_agreement = 0
essential_disagreement = 0
for chat_id in chat_ids:
# compute number of candidate markables in first k utterances
num_candidates = []
for k in range(len(utterance_end_idxs[chat_id])):
num_canditates_k = 0
for markable in markable_annotation[chat_id]["markables"]:
if not markable["generic"] and (markable["predicative"] is None) and markable["start"] < utterance_end_idxs[chat_id][k]:
num_canditates_k += 1
num_candidates.append(num_canditates_k)
# compute num_pairwise_judgements, num_pairwise_agreement, num_annotator_arguments
num_pairwise_judgements = 0
num_pairwise_disagreement = 0
num_annotator_1_arguments = 0
num_annotator_2_arguments = 0
assert len(annotator_1[chat_id]["relations"]) == len(annotator_2[chat_id]["relations"])
for i in range(len(annotator_1[chat_id]["relations"])):
annotator_1_relation = annotator_1[chat_id]["relations"][i]
annotator_2_relation = annotator_2[chat_id]["relations"][i]
if "is_split" in annotator_1_relation["tags"]:
continue
for k in range(len(utterance_end_idxs[chat_id])):
if annotator_1_relation["start"] < utterance_end_idxs[chat_id][k]:
num_pairwise_judgements += num_candidates[k]
break
is_disagreement = False
for obj in annotator_1_relation["objects"]:
if obj not in annotator_2_relation["objects"]:
num_pairwise_disagreement += 1
is_disagreement = True
num_annotator_1_arguments += 1
for obj in annotator_2_relation["objects"]:
if obj not in annotator_1_relation["objects"]:
num_pairwise_disagreement += 1
is_disagreement = True
num_annotator_2_arguments += 1
if is_disagreement:
annotator_1_referents = set()
for obj in annotator_1_relation["objects"]:
for referent in referent_annotation[chat_id][obj]["referents"]:
referent_id = int(referent.split('_')[-1])
annotator_1_referents.add(referent_id)
for subject in annotator_1_relation["subjects"]:
for referent in referent_annotation[chat_id][subject]["referents"]:
referent_id = int(referent.split('_')[-1])
if referent_id in annotator_1_referents:
annotator_1_referents.remove(referent_id)
annotator_2_referents = set()
for obj in annotator_2_relation["objects"]:
for referent in referent_annotation[chat_id][obj]["referents"]:
referent_id = int(referent.split('_')[-1])
annotator_2_referents.add(referent_id)
for subject in annotator_2_relation["subjects"]:
for referent in referent_annotation[chat_id][subject]["referents"]:
referent_id = int(referent.split('_')[-1])
if referent_id in annotator_2_referents:
annotator_2_referents.remove(referent_id)
if annotator_1_referents == annotator_2_referents:
essential_agreement += 1
else:
essential_disagreement += 1
total_pairwise_judgements += num_pairwise_judgements
total_pairwise_disagreement += num_pairwise_disagreement
total_annotator_1_arguments += num_annotator_1_arguments
total_annotator_2_arguments += num_annotator_2_arguments
# compute object agreement
print("===object agreement===")
_compute_agreement(total_pairwise_judgements, total_pairwise_disagreement, total_annotator_1_arguments, total_annotator_2_arguments)
if (essential_agreement + essential_disagreement) > 0:
print("essential agreement: {:.2f}% ({} out of {})".format(100.0 * essential_agreement / (essential_agreement + essential_disagreement),
essential_agreement, (essential_agreement + essential_disagreement)))
# Step 3: Compute modificant agreement
total_pairwise_judgements = 0
total_pairwise_disagreement = 0
total_annotator_1_modificants = 0
total_annotator_2_modificants = 0
for chat_id in chat_ids:
# compute number of candidate spatial expressions in first k utterances
num_candidates = []
for k in range(len(utterance_end_idxs[chat_id])):
num_canditates_k = 0
for relation in annotator_1[chat_id]["relations"]:
if relation["start"] < utterance_end_idxs[chat_id][k]:
num_canditates_k += 1
for attribute in annotator_1[chat_id]["attributes"]:
if attribute["start"] < utterance_end_idxs[chat_id][k]:
num_canditates_k += 1
num_candidates.append(num_canditates_k)
# collect modifier id --> modificants
annotator_1_modificants = {}
annotator_2_modificants = {}
# collect modifier id --> modifier start
modifier_id2start = {}
assert len(annotator_1[chat_id]["relations"]) == len(annotator_2[chat_id]["relations"])
for i in range(len(annotator_1[chat_id]["relations"])):
annotator_1_relation = annotator_1[chat_id]["relations"][i]
for modifier in annotator_1_relation["modifiers"]:
annotator_1_modificants[modifier["id"]] = "relation_{}".format(i)
modifier_id2start[modifier["id"]] = modifier["start"]
annotator_2_relation = annotator_2[chat_id]["relations"][i]
for modifier in annotator_2_relation["modifiers"]:
annotator_2_modificants[modifier["id"]] = "relation_{}".format(i)
modifier_id2start[modifier["id"]] = modifier["start"]
assert len(annotator_1[chat_id]["attributes"]) == len(annotator_2[chat_id]["attributes"])
for i in range(len(annotator_1[chat_id]["attributes"])):
annotator_1_attribute = annotator_1[chat_id]["attributes"][i]
for modifier in annotator_1_attribute["modifiers"]:
annotator_1_modificants[modifier["id"]] = "attribute_{}".format(i)
modifier_id2start[modifier["id"]] = modifier["start"]
annotator_2_attribute = annotator_2[chat_id]["attributes"][i]
for modifier in annotator_2_attribute["modifiers"]:
annotator_2_modificants[modifier["id"]] = "attribute_{}".format(i)
modifier_id2start[modifier["id"]] = modifier["start"]
# compute num_pairwise_judgements, num_pairwise_agreement, num_annotator_arguments
num_pairwise_judgements = 0
num_pairwise_disagreement = 0
num_annotator_1_modificants = len(annotator_1_modificants)
num_annotator_2_modificants = len(annotator_2_modificants)
assert num_annotator_1_modificants == num_annotator_2_modificants
for modifier_id in annotator_1_modificants.keys():
for k in range(len(utterance_end_idxs[chat_id])):
if modifier_id2start[modifier_id] < utterance_end_idxs[chat_id][k]:
num_pairwise_judgements += num_candidates[k]
break
if annotator_1_modificants[modifier_id] != annotator_2_modificants[modifier_id]:
num_pairwise_disagreement += 2
total_pairwise_judgements += num_pairwise_judgements
total_pairwise_disagreement += num_pairwise_disagreement
total_annotator_1_modificants += num_annotator_1_modificants
total_annotator_2_modificants += num_annotator_2_modificants
# compute modifier agreement
print("===modifier agreement===")
_compute_agreement(total_pairwise_judgements, total_pairwise_disagreement, total_annotator_1_modificants, total_annotator_2_modificants)
def canonical_agreement(args, dialogue_corpus, annotators):
"""
1. Compute Cohen's Kappa for canonical relation
2. Compute Cohen's Kappa for canonical function
"""
markable_annotation = read_json("markable_annotation.json")
referent_annotation = read_json("aggregated_referent_annotation.json")
annotator_1 = read_json("annotator_1.json")
annotator_2 = read_json("annotator_2.json")
chat_ids = list(annotator_1.keys())
assert chat_ids == list(annotator_2.keys())
# Step 1. Compute canonical relation agreement
canonical_relations = []
for canonical_relation_category in canonical_relations_dict.keys():
canonical_relations += list(canonical_relations_dict[canonical_relation_category].keys())
annotator_1_canonical_count = Counter()
annotator_2_canonical_count = Counter()
total_relations = 0
total_disagreement = 0
for chat_id in chat_ids:
assert len(annotator_1[chat_id]["relations"]) == len(annotator_2[chat_id]["relations"])
for i in range(len(annotator_1[chat_id]["relations"])):
annotator_1_relation = annotator_1[chat_id]["relations"][i]
annotator_2_relation = annotator_2[chat_id]["relations"][i]
for canonical_relation in annotator_1_relation["canonical-relations"]:
if canonical_relation == "undefined":
continue
annotator_1_canonical_count[canonical_relation] += 1
if canonical_relation not in annotator_2_relation["canonical-relations"]:
total_disagreement += 1
for canonical_relation in annotator_2_relation["canonical-relations"]:
if canonical_relation == "undefined":
continue
annotator_2_canonical_count[canonical_relation] += 1
if canonical_relation not in annotator_1_relation["canonical-relations"]:
total_disagreement += 1
total_relations += 1
total_judgements = total_relations * len(canonical_relations)
observed_agreement = (total_judgements - total_disagreement) / total_judgements
expected_agreements = []
for canonical_relation in canonical_relations:
annotator_1_prob = annotator_1_canonical_count[canonical_relation] / total_relations
annotator_2_prob = annotator_2_canonical_count[canonical_relation] / total_relations
expected_agreements.append(annotator_1_prob * annotator_2_prob + (1 - annotator_1_prob) * (1 - annotator_2_prob))
expected_agreement = np.mean(expected_agreements)
cohens_kappa = (observed_agreement - expected_agreement) / (1 - expected_agreement)
print("total judgements: {}".format(total_judgements))
print("observed agreement: {}".format(observed_agreement))
print("expected agreement: {}".format(expected_agreement))
print("Cohen's Kappa: {}".format(cohens_kappa))
# Step 2. Compute canonical function agreement
canonical_functions = []
for canonical_function in canonical_functions_dict.keys():
canonical_functions.append(canonical_function)
annotator_1_canonical_count = Counter()
annotator_2_canonical_count = Counter()
total_modifiers = 0
total_agreement = 0
for chat_id in chat_ids:
assert len(annotator_1[chat_id]["relations"]) == len(annotator_2[chat_id]["relations"])
for i in range(len(annotator_1[chat_id]["relations"])):
annotator_1_relation = annotator_1[chat_id]["relations"][i]
annotator_1_id2canonical = {}
for modifier in annotator_1_relation["modifiers"]:
annotator_1_id2canonical[modifier["id"]] = modifier["canonical-function"]
annotator_2_relation = annotator_2[chat_id]["relations"][i]
annotator_2_id2canonical = {}
for modifier in annotator_2_relation["modifiers"]:
annotator_2_id2canonical[modifier["id"]] = modifier["canonical-function"]
assert annotator_1_id2canonical.keys() == annotator_2_id2canonical.keys()
for modifier_id in annotator_1_id2canonical.keys():
if annotator_1_id2canonical[modifier_id] == annotator_2_id2canonical[modifier_id]:
total_agreement += 1
annotator_1_canonical_count[annotator_1_id2canonical[modifier_id]] += 1
annotator_2_canonical_count[annotator_2_id2canonical[modifier_id]] += 1
total_modifiers += 1
total_judgements = total_modifiers
observed_agreement = total_agreement / total_judgements
expected_agreement = 0
for canonical_function in canonical_functions:
expected_agreement += (annotator_1_canonical_count[canonical_function] / total_modifiers) * (annotator_2_canonical_count[canonical_function] / total_modifiers)
cohens_kappa = (observed_agreement - expected_agreement) / (1 - expected_agreement)
print("total judgements: {}".format(total_judgements))
print("observed agreement: {}".format(observed_agreement))
print("expected agreement: {}".format(expected_agreement))
print("Cohen's Kappa: {}".format(cohens_kappa))
pdb.set_trace()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1, help='random seed')
parser.add_argument('--scenario_file', type=str, default="aaai_train_scenarios.json")
parser.add_argument('--scenario_file_2', type=str, default="aaai_train_scenarios_2.json")
parser.add_argument('--transcript_file', type=str, default="final_transcripts.json")
parser.add_argument('--span_agreement', action='store_true', default=False)
parser.add_argument('--argument_agreement', action='store_true', default=False)
parser.add_argument('--canonical_agreement', action='store_true', default=False)
args = parser.parse_args()
np.random.seed(args.seed)
dialogue_corpus = read_json(args.transcript_file)
scenario_list = read_json(args.scenario_file)
scenario_list += read_json(args.scenario_file_2)
if args.span_agreement:
span_agreement(args, dialogue_corpus, annotators=["annotator_1", "annotator_2"])
if args.argument_agreement:
argument_agreement(args, dialogue_corpus, annotators=["annotator_1", "annotator_2"])
if args.canonical_agreement:
canonical_agreement(args, dialogue_corpus, annotators=["annotator_1", "annotator_2"])
|
"""NNPS utility functions to work with Zoltan lists"""
import numpy
from pyzoltan.core.zoltan import get_zoltan_id_type_max
from pysph.base.particle_array import ParticleArray
UINT_MAX = get_zoltan_id_type_max()
def invert_export_lists(comm, exportProcs, recv_count):
"""Invert a given set of export indices.
Parameters:
------------
comm : mpi4py.MPI.Comm
A valid MPI communicator
exportProcs : IntArray
A list of processors to send objects to
recv_count : np.ndarray (out)
Return array of length size which upon output, gives the number of
objects to be received from a given processor.
Given a list of objects that need to be exported to remote processors,
the job of invert lists is to inform each processor the number of
objects it will receive from other processors. This situation arises
for example in the cell based partitioning in PySPH. From the cell
export lists, we have a list of particle indices that need to be
exported to remote neighbors.
"""
# reset the recv_counts to 0
recv_count[:] = 0
# get the rank and size for the communicator
size = comm.Get_size()
rank = comm.Get_rank()
# count the number of objects we need to send to each processor
send_count = np.zeros(shape=size, dtype=np.uint32)
numExport = exportProcs.length
for i in range(numExport):
pid = exportProcs[i]
send_count[pid] += 1
# receive buffer for all gather
recvbuf = np.zeros(shape=size*size, dtype=np.uint32)
# do an all gather to receive the data
comm.Allgather(sendbuf=send_count, recvbuf=recvbuf)
# store the number of objects to be received from each processor
for i in range(size):
proc_send_count = recvbuf[i*size:(i+1)*size]
recv_count[i] = proc_send_count[rank]
def count_recv_data(
comm, recv, numImport, importProcs):
"""Count the data to be received from different processors.
Parameters:
-----------
comm : mpi.Comm
MPI communicator
recv : dict
Upon output, will contain keys corresponding to processors and
values indicating number of objects to receive from that proc.
numImport : int
Zoltan generated total number of objects to be imported
to the calling proc
importProcs : DoubleArray
Zoltan generated list for processors from where objects are
to be received.
"""
rank = comm.Get_rank()
size = comm.Get_size()
recv.clear()
for processor in range(size):
recv[processor] = 0
for i in range(numImport):
processor = importProcs[i]
recv[processor] += 1
for processor in recv.keys():
if recv[processor] == 0:
del recv[processor]
def get_send_data(
comm, pa, lb_props, _exportIndices, _exportProcs):
"""Collect the data to send in a dictionary.
Parameters:
-----------
comm : mpi.Comm
MPI communicator
pa : ParticleArray
Reference to the particle array from where send data is gathered
lb_props : list
A list of prop names to collect data
_exportIndices : UIntArray
Zoltan generated list of local indices to export
_exportProcs : IntArray
Zoltan generated list of processors to export to
Returns a dictionary of dictionaries 'send' which is keyed on
processor id and with values a dictionary of prop names and
corresponding particle data.
"""
rank = comm.Get_rank()
size = comm.Get_size()
procs = _exportProcs.get_npy_array()
exportIndices = _exportIndices.get_npy_array()
props = {}
for prop in lb_props:
props[prop] = pa.get_carray(prop).get_npy_array()
send = {}
for pid in range(size):
indices = numpy.where( procs == pid )[0]
#if len(indices) > 0:
send[pid] = {}
for prop, prop_array in props.iteritems():
send[pid][prop] = prop_array[ exportIndices[indices] ]
# save the local ids exported to each processor
send[pid]['lid'] = exportIndices[indices]
send[pid]['msglength'] = exportIndices[indices].size
return send
def Recv(comm, localbuf, recvbuf, source, localbufsize=0, tag=0):
"""MPI Receive operation
Parameters:
-----------
comm : mpi.Comm
The mpi communcator
localbuf : CArray
The local buffer to which the data is received in
recvbuf : CArray
the buffer in which to receive data from comm.Recv
source : int
processor from where the data originates
localbufsize : int
Current length index for the local buffer. Defaults to 0
tag : int
optional message tag
For the situation in which we receive data from multiple
processors to be stored in a single array (localbuf), we receive
the data in 'recvbuf' and then add it to the correct indices using
a pointer to the current index (localbufsize) and the message
length (recvbuf.length)
"""
# get the message length. we assume this is known before actually
# doing the receive.
msglength = recvbuf.length
# get the Numpy buffer for the C-arrays
_localbuf = localbuf.get_npy_array()
_recvbuf = recvbuf.get_npy_array()
# Receive the Numpy buffer from source
comm.Recv( buf=_recvbuf, source=source, tag=tag )
# add the contents to the local buffer. If localbufsize is 0, then
# the two arrays are the same.
_localbuf[localbufsize:localbufsize+msglength] = _recvbuf[:]
def get_particle_array(name="", **props):
"""Return a particle array"""
nprops = len(props)
np = 0
prop_dict = {}
for prop in props.keys():
data = numpy.asarray(props[prop])
np = data.size
if prop in ['pid', 'type', 'tag']:
prop_dict[prop] = {'data':data,
'type':'int',
'name':prop}
elif prop in ['gid']:
prop_dict[prop] = {'data':data.astype(numpy.uint32),
'type': 'unsigned int',
'name':prop}
else:
prop_dict[prop] = {'data':data,
'type':'double',
'name':prop}
default_props = ['x', 'y', 'z', 'h', 'rho', 'gid', 'tag', 'type', 'pid']
for prop in default_props:
if not prop in prop_dict:
if prop in ["type", "tag", "pid"]:
prop_dict[prop] = {'name':prop, 'type':'int',
'default':0}
elif prop in ['gid']:
data = numpy.ones(shape=np, dtype=numpy.uint32)
data[:] = UINT_MAX
prop_dict[prop] = {'name':prop, 'type':'unsigned int',
'data':data}
else:
prop_dict[prop] = {'name':prop, 'type':'double',
'default':0}
# create the particle array
pa = ParticleArray(name="",**prop_dict)
return pa
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import fields, models, _
_logger = logging.getLogger(__name__)
class BusinessBorrower(models.Model):
'''BusinessBorrower class represent a business entity that applies for a loan,'''
_inherit = 'res.partner'
company_currency_id = fields.Many2one(
comodel_name='res.currency',
string="Company Currency",default=lambda self: self.env.company.currency_id)
is_borrower = fields.Boolean('Is partner is borrower',default=False)
entity = fields.Char('Type of Entity')
contact_person = fields.Char('Contact Person',help='Contact Person')
business_sector = fields.Char('Business Section')
no_employees = fields.Integer('Number of employees',default=0)
business_year = fields.Float('Years in business',default=0.0,digits='Business Year')
business_premises = fields.Char('Business Premised')
company_reg_no = fields.Char('Company Registration Number')
member_number = fields.Integer('Member Number')
main_branch = fields.Text('Main branch')
accountant_details = fields.Text('Accountant Details')
tax_confirmation = fields.Char('Tax Confirmation Received')
solicitor_details = fields.Text('Solicitor Details')
business_drawing = fields.Monetary(string="Bussiness Drawing",currency_field='company_currency_id')
|
import operator
import json
from collections import Counter
import Tweet_processing_func as tp
from nltk.corpus import stopwords
import string
from collections import defaultdict
# com[x][y] contains the number of times the term x has been seen in the same tweet as the term y
com = defaultdict(lambda: defaultdict(int)) # creates a dictionary where the entry for each ey is a dictionary
punctuation = list(string.punctuation)
stop = stopwords.words('english') + punctuation + ['RT', 'via', '...', '.', '*']
fname = 'python.json'
with open(fname, 'r') as f:
count_all = Counter()
for line in f:
try:
tweet = json.loads(line) # Create a list with all the terms
terms_only = [term for term in tp.preprocess(tweet['text']) if
term not in stop and not term.startswith(('@', '#'))]
count_all.update(terms_only) # Update the counter
except ValueError: # print(tweet['text'])
var1 = 1
# i=i+1
# print(i)
# Build co-occurrence matrix
for i in range(len(terms_only) - 1):
for j in range(i + 1, len(terms_only)):
w1, w2 = sorted([terms_only[i], terms_only[j]])
if w1 != w2:
com[w1][w2] += 1
com_max = []
#For each term, look for the most common co-occurrent terms
for t1 in com:
t1_max_terms = sorted(com[t1].items(), key=operator.itemgetter(1), reverse=True)[:5]
for t2, t2_count in t1_max_terms:
com_max.append(((t1, t2), t2_count))
# Get the most frequent co-occurrences
terms_max = sorted(com_max, key=operator.itemgetter(1), reverse=True)
print(terms_max[:30])
#Print the first 5 most frequent words
#print(count_all.most_common(30))
|
#!/usr/bin/env python
# Author: oscar.kene@klarna.com
#
# Manages zones on fortigates in fortimanager
from ansible.module_utils.basic import AnsibleModule
import requests
import json
from Forti import FortiMgr
def main():
module = AnsibleModule(
argument_spec=dict(
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
endpoint=dict(type='str', required=True),
adom=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['present', 'absent']),
zone=dict(type='str', required=True)
),
)
# required attributes
adom = module.params['adom']
username = module.params['username']
password = module.params['password']
state = module.params['state']
zone = module.params['zone']
changed = False
result = []
f = FortiMgr(module.params['endpoint'])
r = f.login(username, password=password)
result.append(r)
if not r[0]:
module.fail_json(msg="Failed to log in %s." % username)
if state == "present":
try:
r = f.create_zone(adom, zone)
result.append(r)
if f.is_success(r[1], [0]):
changed = True
elif not f.is_success(r[1], [-2]):
# if result code is not 0 or -2 raise an exception
raise
except:
module.fail_json(msg="Failed to create zone %s." % zone, result=result)
elif state == "absent":
try:
r = f.delete_zone(adom, zone)
result.append(r)
if f.is_success(r[1], [0]):
changed = True
elif not f.is_success(r[1], [-3]):
# if result code is not 0 or -3 raise an exception
raise
except:
module.fail_json(msg="Failed to delete zone %s." % zone, result=result)
f.logout()
module.exit_json(changed=changed, result=result)
if __name__ == "__main__":
main()
|
celsius = float(input('Write a temperature in ºC:'))
fahrenheit = (celsius * 9/5) + 32
print('This temperature ºC {} in Fahrenheit is ºF {}'.format(celsius,fahrenheit))
|
import distutils
import itertools
import os
import re
import shutil
import subprocess
import sys
from typing import Dict, List
from distutils.command.bdist import bdist as _bdist
from distutils.command.install_data import install_data as _install_data
from distutils import log
# requires setuptools >= 64.0.0
import setuptools.command.build # for SubCommand
from setuptools.command.build import build as _build
import setuptools
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext as _build_ext
from setuptools.command.develop import develop as _develop
from setuptools.command.install import install as _install
from setuptools.command.sdist import sdist as _sdist
import warnings
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
SETUP_DIR = os.path.abspath(os.path.dirname(__file__))
PKG_INFO = 'PKG-INFO'
EXT_SRC = 'catboost_all_src'
def get_topsrc_dir():
if os.path.exists(os.path.join(SETUP_DIR, PKG_INFO)):
return os.path.join(SETUP_DIR, EXT_SRC)
else:
return os.path.abspath(os.path.join(SETUP_DIR, '..', '..'))
class ExtensionWithSrcAndDstSubPath(Extension):
def __init__(self, name, cmake_build_sub_path, dst_sub_path):
super().__init__(name, sources=[])
self.cmake_build_sub_path = cmake_build_sub_path
self.dst_sub_path = dst_sub_path
def setup_hnsw_submodule(argv, extensions):
"""
Does not respect --dry-run because main setup.py commands won't work correctly without this submodule setup
"""
cmake_build_sub_path = os.path.join('library', 'python', 'hnsw', 'hnsw')
dst_sub_path = os.path.join('catboost', 'hnsw')
hnsw_submodule_dir = os.path.join(SETUP_DIR, dst_sub_path)
verbose = '--verbose' in argv
if '--with-hnsw' in argv:
extensions.append(ExtensionWithSrcAndDstSubPath('_hnsw', cmake_build_sub_path, dst_sub_path))
if not os.path.exists(hnsw_submodule_dir):
log.info('Creating hnsw submodule')
hnsw_original_dir = os.path.join(get_topsrc_dir(), cmake_build_sub_path)
if verbose:
log.info(f'create symlink from {hnsw_original_dir} to {hnsw_submodule_dir}')
# there can be issues on Windows when creating symbolic and hard links
try:
os.symlink(hnsw_original_dir, hnsw_submodule_dir, target_is_directory=True)
return
except Exception as exception:
log.error(f'Encountered an error ({str(exception)}) when creating symlink, try to create hardlink instead')
if verbose:
log.info(f'create hardlink from {hnsw_original_dir} to {hnsw_submodule_dir}')
try:
os.link(hnsw_original_dir, hnsw_submodule_dir)
return
except Exception as exception:
log.error(f'Encountered an error ({str(exception)}) when creating hardlink, just copy instead')
if verbose:
log.info(f'copy from {hnsw_original_dir} to {hnsw_submodule_dir}')
shutil.copytree(hnsw_original_dir, hnsw_submodule_dir, dirs_exist_ok=True)
elif os.path.exists(hnsw_submodule_dir):
if verbose:
log.info('remove previously used catboost.hnsw submodule')
if os.path.islink(hnsw_submodule_dir):
os.remove(hnsw_submodule_dir)
elif sys.version_info >= (3, 8):
shutil.rmtree(hnsw_submodule_dir)
else:
raise RuntimeError("Cannot correctly remove previously used 'hnsw' submodule because it might be a directory junction")
def get_setup_requires(argv):
setup_requires = ['wheel']
if ('build_widget' in argv) or (not ('--no-widget' in argv)):
setup_requires += ['jupyterlab']
return setup_requires
def get_all_cmake_lists(topdir, sub_path):
return [
os.path.join(sub_path, f) for f in os.listdir(os.path.join(topdir, sub_path))
if f.startswith('CMakeLists')
]
def get_all_files_wo_built_artifacts(topdir, sub_path, exclude_regexp_str, verbose):
exclude_regexp = re.compile(exclude_regexp_str)
result = []
os.chdir(topdir)
try:
for dirpath, dirnames, filenames in os.walk(sub_path, followlinks=True, topdown=True):
i = 0
while i < len(dirnames):
sub_path = os.path.join(dirpath, dirnames[i])
if exclude_regexp.match(sub_path):
if verbose:
log.info(f'excluded {sub_path} from copying')
del dirnames[i]
else:
i += 1
for f in filenames:
sub_path = os.path.join(dirpath, f)
if exclude_regexp.match(sub_path):
if verbose:
log.info(f'excluded {sub_path} from copying')
else:
result.append(sub_path)
finally:
os.chdir(SETUP_DIR)
return result
def copy_catboost_sources(topdir, pkgdir, verbose, dry_run):
topnames = [
'AUTHORS', 'LICENSE', 'CONTRIBUTING.md', 'README.md', 'RELEASE.md',
'conanfile.txt',
'build',
os.path.join('catboost', 'base_defs.pxd'),
os.path.join('catboost', 'cuda'),
os.path.join('catboost', 'idl'),
os.path.join('catboost', 'libs'),
os.path.join('catboost', 'private'),
os.path.join('catboost', 'tools'),
'cmake',
os.path.join('contrib', 'deprecated'),
os.path.join('contrib', 'libs'),
os.path.join('contrib', 'python'), # TODO: remove it, only numpy headers are used from there
os.path.join('contrib', 'restricted'),
os.path.join('contrib', 'tools', 'cython'),
os.path.join('contrib', 'tools', 'flatc'),
os.path.join('contrib', 'tools', 'protoc'),
'tools',
'util',
]
topnames += get_all_cmake_lists(topdir, '')
topnames += get_all_cmake_lists(topdir, 'catboost')
topnames += get_all_cmake_lists(topdir, 'contrib')
topnames += get_all_cmake_lists(topdir, os.path.join('contrib', 'tools'))
# we have to include them all (not only python-package) to avoid CMake configuration errors
for sub_dir in ['R-package', 'app', 'jvm-packages', 'python-package', 'spark']:
topnames += get_all_cmake_lists(topdir, os.path.join('catboost', sub_dir))
# if there were editable installs the source tree can contain __pycache__ and built shared libraries
exclude_regexp = '.*(__pycache__|\\.(so|dylib|dll|pyd))$'
topnames += get_all_files_wo_built_artifacts(topdir, 'library', exclude_regexp, verbose)
topnames += get_all_files_wo_built_artifacts(
topdir,
os.path.join('catboost', 'python-package', 'catboost'),
exclude_regexp,
verbose
)
for name in topnames:
src = os.path.join(topdir, name)
dst = os.path.join(pkgdir, name)
if os.path.isdir(src):
distutils.dir_util.copy_tree(src, dst, verbose=verbose, dry_run=dry_run)
else:
distutils.dir_util.mkpath(os.path.dirname(dst))
distutils.file_util.copy_file(src, dst, update=1, verbose=verbose, dry_run=dry_run)
def emph(s):
return '\x1b[32m{}\x1b[0m'.format(s)
def get_catboost_version():
version_py = os.path.join('catboost', 'version.py')
exec(compile(open(version_py).read(), version_py, 'exec'))
return locals()['VERSION']
class OptionsHelper(object):
@staticmethod
def get_user_options(extra_options_classes):
return list(itertools.chain.from_iterable([cls.options for cls in extra_options_classes]))
@staticmethod
def initialize_options(command):
for extra_options_class in command.__class__.extra_options_classes:
extra_options_class.initialize_options(command)
@staticmethod
def finalize_options(command):
for extra_options_class in command.__class__.extra_options_classes:
extra_options_class.finalize_options(command)
@staticmethod
def propagate(command, subcommand_name, options):
sub_cmd = command.reinitialize_command(subcommand_name, reinit_subcommands=True)
for opt_name in options:
setattr(sub_cmd, opt_name, getattr(command, opt_name))
class HNSWOptions(object):
options = [
('with-hnsw', None, emph('Build with hnsw as catboost submodule')),
]
@staticmethod
def initialize_options(command):
command.with_hnsw = False
@staticmethod
def finalize_options(command):
pass
@staticmethod
def get_options_attribute_names():
return ['with_hnsw']
class WidgetOptions(object):
options = [
('no-widget', None, emph('Disable Jupyter visualization widget support that is enabled by default')),
('prebuilt-widget', None, emph('Do not rebuild already built widget in "build-generated" directory'))
]
@staticmethod
def initialize_options(command):
command.no_widget = False
command.prebuilt_widget = False
@staticmethod
def finalize_options(command):
pass
def get_options_attribute_names():
return ['no_widget', 'prebuilt_widget']
class BuildExtOptions(object):
options = [
('with-cuda=',
None,
emph(
'Path to CUDA root dir '
+ '(useful if CUDA_ROOT or CUDA_PATH is not specified or a particular CUDA version is needed)'
)
),
('no-cuda', None, emph('Build without CUDA support even if CUDA is available')),
('parallel=', 'j', emph('Number of parallel build jobs')),
('prebuilt-extensions-build-root-dir=', None, emph('Use extensions from CatBoost project prebuilt with CMake')),
('macos-universal-binaries', None, emph('Build extension libraries as macOS universal binaries'))
]
@staticmethod
def initialize_options(command):
command.with_cuda = None
command.no_cuda = False
command.parallel = None
command.prebuilt_extensions_build_root_dir = None
command.macos_universal_binaries=False
@staticmethod
def finalize_options(command):
if command.with_cuda is not None:
if command.no_cuda:
raise RuntimeError('--with-cuda and --no-cuda options are incompatible')
if not os.path.exists(command.with_cuda):
raise RuntimeError(f'CUDA root dir passed in --with-cuda ({command.with_cuda}) does not exist')
elif not command.no_cuda:
for cuda_root in ('CUDA_PATH', 'CUDA_ROOT'):
if (cuda_root in os.environ) and os.path.exists(os.environ[cuda_root]):
log.info(f'Get default CUDA root dir from {cuda_root} environment variable: {os.environ[cuda_root]}')
command.with_cuda = os.environ[cuda_root]
break
def get_options_attribute_names():
return ['with_cuda', 'no_cuda', 'parallel', 'prebuilt_extensions_build_root_dir', 'macos_universal_binaries']
class build(_build):
extra_options_classes = [HNSWOptions, WidgetOptions, BuildExtOptions]
user_options = _build.user_options + OptionsHelper.get_user_options(extra_options_classes)
def initialize_options(self):
_build.initialize_options(self)
OptionsHelper.initialize_options(self)
def finalize_options(self):
_build.finalize_options(self)
OptionsHelper.finalize_options(self)
def run(self):
OptionsHelper.propagate(
self,
"build_ext",
HNSWOptions.get_options_attribute_names() + BuildExtOptions.get_options_attribute_names()
)
OptionsHelper.propagate(
self,
"build_widget",
['prebuilt_widget']
)
_build.run(self)
def no_widget_option_is_not_set(self):
return not self.no_widget
sub_commands = [
('build_py', _build.has_pure_modules),
('build_ext', _build.has_ext_modules),
('build_scripts', _build.has_scripts),
('build_widget', no_widget_option_is_not_set)
]
class bdist(_bdist):
extra_options_classes = [HNSWOptions, WidgetOptions, BuildExtOptions]
user_options = _bdist.user_options + OptionsHelper.get_user_options(extra_options_classes)
def initialize_options(self):
_bdist.initialize_options(self)
OptionsHelper.initialize_options(self)
def finalize_options(self):
_bdist.finalize_options(self)
OptionsHelper.finalize_options(self)
def run(self):
OptionsHelper.propagate(
self,
"build",
HNSWOptions.get_options_attribute_names()
+ WidgetOptions.get_options_attribute_names()
+ BuildExtOptions.get_options_attribute_names()
)
_bdist.run(self)
class bdist_wheel(_bdist_wheel):
extra_options_classes = [HNSWOptions, WidgetOptions, BuildExtOptions]
user_options = _bdist_wheel.user_options + OptionsHelper.get_user_options(extra_options_classes)
def initialize_options(self):
_bdist_wheel.initialize_options(self)
OptionsHelper.initialize_options(self)
def finalize_options(self):
_bdist_wheel.finalize_options(self)
OptionsHelper.finalize_options(self)
def run(self):
OptionsHelper.propagate(
self,
"build",
HNSWOptions.get_options_attribute_names()
+ WidgetOptions.get_options_attribute_names()
+ BuildExtOptions.get_options_attribute_names()
)
OptionsHelper.propagate(
self,
"install",
HNSWOptions.get_options_attribute_names()
+ WidgetOptions.get_options_attribute_names()
+ BuildExtOptions.get_options_attribute_names()
)
_bdist_wheel.run(self)
class build_ext(_build_ext):
extra_options_classes = [HNSWOptions, BuildExtOptions]
user_options = _build_ext.user_options + OptionsHelper.get_user_options(extra_options_classes)
def initialize_options(self):
_build_ext.initialize_options(self)
OptionsHelper.initialize_options(self)
def finalize_options(self):
_build_ext.finalize_options(self)
OptionsHelper.finalize_options(self)
@staticmethod
def get_cmake_built_extension_filename(ext_name):
return {
'linux': f'lib{ext_name}.so',
'darwin': f'lib{ext_name}.dylib',
'win32': f'{ext_name}.dll',
}[sys.platform]
@staticmethod
def get_extension_suffix():
return {
'linux': '.so',
'darwin': '.so',
'win32': '.pyd',
}[sys.platform]
def run(self):
verbose = self.distribution.verbose
dry_run = self.distribution.dry_run
for ext in self.extensions:
if not isinstance(ext, ExtensionWithSrcAndDstSubPath):
raise RuntimeError('Only ExtensionWithSrcAndDstSubPath extensions are supported')
put_dir = os.path.abspath(os.path.join(SETUP_DIR if self.inplace else self.build_lib, ext.dst_sub_path))
distutils.dir_util.mkpath(put_dir, verbose=verbose, dry_run=dry_run)
if self.prebuilt_extensions_build_root_dir is not None:
build_dir = self.prebuilt_extensions_build_root_dir
else:
build_dir = os.path.abspath(self.build_temp)
self.build_with_cmake_and_ninja(get_topsrc_dir(), build_dir, verbose, dry_run)
self.copy_artifacts_built_with_cmake(build_dir, verbose, dry_run)
def build_with_cmake_and_ninja(self, topsrc_dir, build_dir, verbose, dry_run):
targets = [ext.name for ext in self.extensions]
log.info(f'Buildling {",".join(targets)} with cmake and ninja')
sys.path = [os.path.join(topsrc_dir, 'build')] + sys.path
import build_native
python3_root_dir = os.path.abspath(os.path.join(os.path.dirname(sys.executable), os.pardir))
if self.with_cuda:
cuda_support_msg = 'with CUDA support'
else:
cuda_support_msg = 'without CUDA support'
build_native.build(
build_root_dir=build_dir,
targets=targets,
build_type='Debug' if self.debug else 'Release',
verbose=verbose,
dry_run=dry_run,
parallel_build_jobs=self.parallel,
have_cuda=bool(self.with_cuda),
cuda_root_dir=self.with_cuda,
macos_universal_binaries=self.macos_universal_binaries,
cmake_extra_args=[f'-DPython3_ROOT_DIR={python3_root_dir}']
)
if not dry_run:
log.info(f'Successfully built {",".join(targets)} {cuda_support_msg}')
def copy_artifacts_built_with_cmake(self, build_dir, verbose, dry_run):
for ext in self.extensions:
# TODO(akhropov): CMake produces wrong artifact names right now so we have to rename it
src = os.path.join(
build_dir,
ext.cmake_build_sub_path,
build_ext.get_cmake_built_extension_filename(ext.name)
)
dst = os.path.join(
SETUP_DIR if self.inplace else self.build_lib,
ext.dst_sub_path,
ext.name + build_ext.get_extension_suffix()
)
if dry_run:
# distutils.file_util.copy_file checks that src file exists so we can't just call it here
distutils.file_util.log.info(f'copying {src} -> {dst}')
else:
distutils.file_util.copy_file(src, dst, verbose=verbose, dry_run=dry_run)
# does not add '.exe' to the command on Windows unlike standard 'spawn' from distutils
# and requires 'shell=True'
def spawn_wo_exe(cmd_str, dry_run):
log.info(cmd_str)
if dry_run:
return
subprocess.check_call(cmd_str, shell=True)
class build_widget(setuptools.Command, setuptools.command.build.SubCommand):
description = "build CatBoost Jupyter visualization widget (requires yarn (https://yarnpkg.com/))"
user_options = [
('build-generated=', 'b', "directory for built modules"),
('prebuilt-widget', None, emph('Do not rebuild already built widget in "build-generated" directory'))
]
boolean_options = ['inplace']
inplace: bool = False
def initialize_options(self):
self.editable_mode = False
self.build_generated = None
self.inplace = False
self.prebuilt_widget = False
def finalize_options(self):
if self.build_generated is None:
self.build_generated = os.path.join('build', 'widget')
if self.editable_mode:
self.inplace = True
def _build(self, verbose, dry_run):
src_js_dir = os.path.join('catboost', 'widget', 'js')
distutils.dir_util.copy_tree(
src_js_dir,
self.build_generated,
verbose=verbose,
dry_run=dry_run
)
if not dry_run:
os.chdir(self.build_generated)
try:
for sub_cmd in ['clean', 'install', 'build']:
spawn_wo_exe(f'yarn {sub_cmd}', dry_run=dry_run)
finally:
os.chdir(SETUP_DIR)
def get_source_files(self):
result = [os.path.join('catboost', 'widget', 'catboost-widget.json')]
for dirpath, _, filenames in os.walk(os.path.join('catboost', 'widget', 'js')):
result += [os.path.join(dirpath, f) for f in filenames]
return result
def get_output_mapping(self) -> Dict[str, str]:
# because they will go 'data' part they won't be returned there
return {}
def get_outputs(self) -> List[str]:
# because they will go 'data' part they won't be returned there
return []
def get_data_files(self, dry_run):
# data_files in the same format as setup's data_files argument
data_files = []
src_dir = os.path.join(self.build_generated, 'nbextension')
dst_dir = os.path.join('share', 'jupyter', 'nbextensions', 'catboost-widget')
data_files.append( (dst_dir, [os.path.join(src_dir, f) for f in ['extension.js', 'index.js']]) )
src_dir = os.path.join(self.build_generated, 'labextension')
dst_dir = os.path.join('share', 'jupyter', 'labextensions', 'catboost-widget')
data_files.append( (dst_dir, [os.path.join(src_dir, 'package.json')]) )
src_dir = os.path.join(src_dir, 'static')
dst_dir = os.path.join(dst_dir, 'static')
if dry_run and not os.path.exists(src_dir):
raise RuntimeError("Cannot do dry_run because contents of labextension/static depend on really running build_widget")
src_files = [ os.path.join(src_dir, f) for f in os.listdir(src_dir) ]
data_files.append( (dst_dir, src_files) )
dst_dir = os.path.join('etc', 'jupyter', 'nbconfig', 'notebook.d')
data_files.append( (dst_dir, [os.path.join('catboost', 'widget', 'catboost-widget.json')]) )
return data_files
def run(self):
verbose = self.distribution.verbose
dry_run = self.distribution.dry_run
if not self.prebuilt_widget:
self._build(verbose, dry_run)
class develop(_develop):
extra_options_classes = [HNSWOptions, WidgetOptions, BuildExtOptions]
user_options = _develop.user_options + OptionsHelper.get_user_options(extra_options_classes)
def initialize_options(self):
_develop.initialize_options(self)
OptionsHelper.initialize_options(self)
def finalize_options(self):
_develop.finalize_options(self)
OptionsHelper.finalize_options(self)
if not self.no_widget:
warnings.warn(
'Widget installation in develop mode is not supported. See https://github.com/pypa/pip/issues/6592',
Warning
)
def install_for_development(self):
self.run_command('egg_info')
# Build extensions in-place
OptionsHelper.propagate(
self,
"build_ext",
HNSWOptions.get_options_attribute_names()
+ BuildExtOptions.get_options_attribute_names()
)
self.distribution.get_command_obj('build_ext').inplace = 1
self.run_command('build_ext')
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
self.install_namespaces()
# create an .egg-link in the installation dir, pointing to our egg
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
with open(self.egg_link, "w") as f:
f.write(self.egg_path + "\n" + self.setup_path)
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
class install_data(_install_data):
extra_options_classes = [WidgetOptions]
user_options = _install.user_options + OptionsHelper.get_user_options(extra_options_classes)
def initialize_options(self):
_install_data.initialize_options(self)
OptionsHelper.initialize_options(self)
def finalize_options(self):
_install_data.finalize_options(self)
OptionsHelper.finalize_options(self)
if not self.no_widget:
if self.data_files is None:
self.data_files = []
self.data_files += self.get_finalized_command("build_widget").get_data_files(
dry_run=self.distribution.dry_run
)
class install(_install):
extra_options_classes = [HNSWOptions, WidgetOptions, BuildExtOptions]
user_options = _install.user_options + OptionsHelper.get_user_options(extra_options_classes)
def initialize_options(self):
_install.initialize_options(self)
OptionsHelper.initialize_options(self)
def finalize_options(self):
_install.finalize_options(self)
OptionsHelper.finalize_options(self)
def has_data(self):
return super().has_data() or (not self.no_widget)
def run(self):
if 'build' not in self.distribution.have_run:
# do not propagate if build has already been called before install
OptionsHelper.propagate(
self,
"build",
HNSWOptions.get_options_attribute_names()
+ WidgetOptions.get_options_attribute_names()
+ BuildExtOptions.get_options_attribute_names()
)
OptionsHelper.propagate(
self,
"install_data",
WidgetOptions.get_options_attribute_names()
)
_install.run(self)
sub_commands = [
('install_lib', _install.has_lib),
('install_headers', _install.has_headers),
('install_scripts', _install.has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
class sdist(_sdist):
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
copy_catboost_sources(
os.path.join(SETUP_DIR, '..', '..'),
os.path.join(base_dir, EXT_SRC),
verbose=self.distribution.verbose,
dry_run=self.distribution.dry_run,
)
if __name__ == '__main__':
if sys.platform == 'win32':
os.system('color')
extensions = [
ExtensionWithSrcAndDstSubPath(
'_catboost',
os.path.join('catboost', 'python-package', 'catboost'),
'catboost'
)
]
setup_hnsw_submodule(sys.argv, extensions)
setup_requires = get_setup_requires(sys.argv)
setup(
name=os.environ.get('CATBOOST_PACKAGE_NAME') or 'catboost',
version=os.environ.get('CATBOOST_PACKAGE_VERSION') or get_catboost_version(),
packages=find_packages(),
package_data={
'catboost.widget': ['__init__.py', 'ipythonwidget.py', 'metrics_plotter.py', 'callbacks.py'],
},
ext_modules=extensions,
cmdclass={
'bdist_wheel': bdist_wheel,
'bdist': bdist,
'build_ext': build_ext,
'build_widget': build_widget,
'build': build,
'develop': develop,
'install': install,
'install_data': install_data,
'sdist': sdist,
},
extras_require={
'widget': ['traitlets', 'ipython', 'ipywidgets (>=7.0, <9.0)']
},
author='CatBoost Developers',
description='CatBoost Python Package',
long_description='CatBoost is a fast, scalable, high performance gradient boosting on decision trees library. Used for ranking, classification, regression and other ML tasks.',
license='Apache License, Version 2.0',
url='https://catboost.ai',
project_urls={
'GitHub': 'https://github.com/catboost/catboost',
'Bug Tracker': 'https://github.com/catboost/catboost/issues',
'Documentation': 'https://catboost.ai/docs/',
'Benchmarks': 'https://catboost.ai/#benchmark',
},
keywords=['catboost'],
platforms=['Linux', 'Mac OSX', 'Windows', 'Unix'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=[
'graphviz',
'matplotlib',
'numpy (>=1.16.0)',
'pandas (>=0.24)',
'scipy',
'plotly',
'six',
],
zip_safe=False,
setup_requires=setup_requires,
)
|
from pydoc import describe
from numpy import empty
import pandas as pd
import numpy as np
import re
# import bs4
import json
import requests
import time
import sys,os
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(r"C:\Users\kaiyu\Desktop\miller")
from chenqian_tools.hdf_helper import *
def connect_url(target_url,req_headers):
con_continnue = True
while con_continnue:
try:
res_ = requests.get(target_url,headers=req_headers)
if res_ is not None:
con_continnue = False
else:
time.sleep(5)
res_ = requests.get(target_url,headers=req_headers)
except Exception as e:
print("链接,出异常了!")
return res_
def initial_file_path(select_df,all_df,res_path = 'all_data_set_path'):
need_sub = '0'
if select_df.__len__()>1:
need_sub = '1'
# if (select_df.sequence == 1).any():
# need_sub = '1'
if need_sub != '0':
for i in range(select_df.__len__()):
select_df_i = select_df.iloc[i].to_frame().T
res_path,all_df = initial_file_path(select_df_i,all_df,res_path)
else:
# 不等於0
write_mark = '0'
if (select_df.display_type >0).any():
write_mark = '1'
if write_mark == '0':
father_id = select_df['guid'].iloc[0]
append_path = '\\'+ select_df['menu_name'].iloc[0]
append_path = append_path.replace('/','_')
res_path = res_path + append_path
# print(res_path)
if not os.path.exists(res_path):
os.mkdir(res_path)
select_df = all_df.loc[all_df.father_guid == father_id]
res_path,all_df = initial_file_path(select_df,all_df,res_path)
# 针对一个节点下之后一个的,回退一次路径
if select_df.__len__() == 1:
last_path = '\\'+res_path.split('\\')[-1:][0]
res_path = res_path.replace(last_path,'')
return res_path,all_df
else:
guid = select_df['guid'].iloc[0]
table_cn_name = select_df['menu_name'].iloc[0]
if table_cn_name is None:
pass
h5_path = f'''{res_path}\\{table_cn_name}'''
print(h5_path)
if not os.path.exists(h5_path+'_'+'table'+'.h5'):
sleep_time = time.sleep(np.random.randint(3,4))
append_table_main(guid,h5_path)
return res_path,all_df
last_path = '\\'+res_path.split('\\')[-1:][0]
res_path = res_path.replace(last_path,'')
return res_path,all_df
#
def append_table_main(guid,h5_group_path_rela):
def process_field_describe(need_append_describe):
data_lst = []
for guid_id in need_append_describe:
field_describe_url = f'''http://gogoaldata.go-goal.cn/api/v1/dd_data/get_field_describle?guid={guid_id}'''
res_ = connect_url(field_describe_url,req_headers)
if res_ is not None:
res = json.loads(res_.text)
df = pd.DataFrame.from_dict(res['data'],orient = 'index').T
data_lst.append(df)
if len(data_lst) > 0:
return pd.concat(data_lst,ignore_index= True)
else:
return pd.DataFrame()
target_url = f'''{table_info_url}?table_type=0&guid={guid}'''
table_info = connect_url(target_url,req_headers)
res = json.loads(table_info.text)
table_field_df = pd.DataFrame(res['data']['table_field'])
describe_df = pd.DataFrame.from_dict(res['data']['table_describle'],orient = 'index').T
need_append_describe = table_field_df.loc[table_field_df.display_description == 1]['guid']
fields_describe_df = process_field_describe(need_append_describe)
fields_describe_df = fields_describe_df.astype('str')
h5_client = h5_helper(h5_group_path_rela+'_'+'table'+'.h5')
table_en_name = res['data']['table_describle']['table_name']
example_url = f'''http://gogoaldata.go-goal.cn/api/v1/dd_data/get_table_content?table_name={table_en_name}'''
exmaple_text = connect_url(example_url,req_headers)
exmaple_text = json.loads(exmaple_text.text)
if exmaple_text['code'] == 500:
exmaple_df = pd.DataFrame()
else:
exmaple_df = pd.DataFrame(exmaple_text['data'])
exmaple_df.columns = [str__.strip('[]') for str__ in exmaple_df.columns]
# 枚举描述
h5_client.append_table(fields_describe_df,'fields_describe_df')
# 表字段信息
h5_client.append_table(table_field_df,'table_field_df')
# 表描述
describe_df = describe_df.astype('str')
h5_client.append_table(describe_df,'describe_df')
# 表样例信息
exmaple_df = exmaple_df.astype('str')
h5_client.append_table(exmaple_df,'exmaple_df')
sleep_time = time.sleep(np.random.randint(3,4))
base_url = 'http://gogoaldata.go-goal.cn/'
cookie = '''acw_tc=0bca294216575900140796892e017889409b7ab583aa05c3ae92913f6c768d; session=764981c035524399bccc98ae49fcc5651657590025074; web=764981c035524399bccc98ae49fcc5651657590025074; tk=764981c035524399bccc98ae49fcc565'''
# Override the default request headers:
req_headers = {
'USER_AGENT':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7',
'Cookie':cookie,
'Connection':'keep-alive',
'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8',
}
# 全分支的节点树
all_tree_url = f'http://gogoaldata.go-goal.cn/api/v1/dd_data/get_header'
#
pay_load_query = '?table_type=0&org_id=45513&function_code=103&user_id=100468768'
# pay_load_query = {
# 'org_id':"45513",
# 'function_code':"103",
# 'user_id':"100468768",
# }
# pay_load_query = json.dumps(pay_load_query)
# 获取主节点的信息
res_all_tree = requests.get(f'''{all_tree_url}{pay_load_query}''',headers=req_headers)
res_all_tree = json.loads(res_all_tree.text)
mother_tree = res_all_tree['data']
all_data_set_path = 'zhaoyang_datafile'
# os.remove(all_data_set_path)
if not os.path.exists(all_data_set_path):
os.mkdir(all_data_set_path)
table_info_url = 'http://gogoaldata.go-goal.cn/api/v1/dd_data/get_table_struct'
# 里面文档的顺序依赖father_guid和guid
mother_df = pd.DataFrame(mother_tree)
mother_df_1 = mother_df.loc[mother_df.level == 1]
for i in range(mother_df_1.__len__()):
select_df = mother_df_1.iloc[i].to_frame().T
initial_file_path(select_df,mother_df,res_path = all_data_set_path)
# 根据 level_id判断path6
# print(label['menu_name'])
# print(label['level'])
# print(label['group_id']) #
# print(label['guid']) # 请求的核心
|
tobeSorted = "94848448"
a = [0] * 10
for i in tobeSorted:
b=int(i)
a[b] = a[b] + 1
i=0
while(i<len(a)):
if(a[i]==0):
i=i+1
continue;
else:
for j in range(0,a[i]):
print(i)
i=i+1
|
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
user_model = get_user_model()
class ContactForm(forms.Form):
name = forms.CharField(
label=' نام ',
widget=forms.TextInput(attrs={'class':'form-control', 'placeholder':'نام خود را وارد کنید'})
)
email = forms.EmailField(
label=' ایمیل ',
widget=forms.EmailInput(attrs={'class':'form-control', 'placeholder':'ایمیل برای تماس با شما', 'style':'margin: 10px auto;'}))
msg = forms.CharField(
label=' پیام ',
widget=forms.Textarea(attrs={'class':'form-control','style':'margin: 10px auto;', 'placeholder':'پیام شما از طریق این فرم برای ما ارسال و در اسرع وقت پیگیری میشود'}))
class LoginForm(forms.Form):
username = forms.CharField(
widget=forms.TextInput(attrs={'class':'form-control','placeholder':'نام کاربری', })
)
password = forms.CharField(
widget=forms.PasswordInput(attrs={'class':'form-control','placeholder':'رمز عبور' })
)
class RegisterForm(forms.Form):
username = forms.CharField(
label='نام کاربری',
widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Username'}),
help_text='<br>'
)
email = forms.EmailField(
label='ایمیل خود را وارد کنید',
widget=(forms.EmailInput(attrs={'class':'form-control', 'placeholder':'برای مثال example@gmail.com '})),
help_text=('<br>')
)
password = forms.CharField(
label='رمز عبور',
widget=forms.PasswordInput(attrs={'class':'form-control','placeholder':' حداقل 4 کاراکتر ' })
)
password2 = forms.CharField(
label='تایید رمز عبور',
widget=forms.PasswordInput(attrs={'class':'form-control','placeholder':'رمز را دوباره وارد کنید' })
)
# make sure username is unique #
def clean_username(self):
username = self.cleaned_data.get('username')
querryset = User.objects.filter(username=username)
if querryset.exists():
raise forms.ValidationError('این نام کاربری قبلا استفاده شده ')
return username
# make sur emails are unique #
def clean_email(self):
email = self.cleaned_data.get('email')
querryset = User.objects.filter(email=email)
if querryset.exists():
raise forms.ValidationError('این ایمیل قبلا استفاده شده ')
return email
# make sure passwords on registery match #
def clean(self):
data = self.cleaned_data
password = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password2')
if password != password2:
raise forms.ValidationError('رمز ها یکسان نیستند')
return data
|
import os
import base64
from flask import make_response
# from flask import send_file
class ResultService:
@classmethod
def get_result(cls, job_id):
path = 'FilesFolder/yaml_gen/%s.zip' % job_id
if os.path.exists(path):
with open(path, 'rb') as f:
content = f.read()
b64_content = base64.b64encode(content)
res = make_response(dict(status=True, code=200, data=b64_content.decode('utf-8'))), 200
else:
res = make_response(dict(status=False, code=200, data=None)), 200
return res
|
import os
import pytest
from impc_etl.transformations.experiment_transformations import _get_closest_weight
from impc_etl.jobs.clean.experiment_cleaner import *
from impc_etl.jobs.clean.specimen_cleaner import clean_specimens
from impc_etl.jobs.extract.impress_extractor import extract_impress
from impc_etl.jobs.extract.xml_extraction_helper import *
FIXTURES_PATH = (
os.environ["FIXTURES_PATH"]
if "FIXTURES_PATH" in os.environ
else "tests/data/fixtures/"
)
INPUT_PATH = (
os.environ["INPUT_PATH"] if "INPUT_PATH" in os.environ else "tests/data/xml/"
)
@pytest.fixture(scope="session")
def experiment_df(spark_session):
if os.path.exists(FIXTURES_PATH + "experiment_parquet"):
experiment_df = spark_session.read.parquet(FIXTURES_PATH + "experiment_parquet")
else:
dcc_df = extract_dcc_xml_files(spark_session, INPUT_PATH, "experiment")
experiment_df = get_experiments_by_type(dcc_df, "experiment")
experiment_df = clean_experiments(experiment_df)
experiment_df.write.mode("overwrite").parquet(
FIXTURES_PATH + "experiment_parquet"
)
return experiment_df
@pytest.fixture(scope="session")
def mouse_df(spark_session):
if os.path.exists(FIXTURES_PATH + "mouse_normalized_parquet"):
mouse_df = spark_session.read.parquet(
FIXTURES_PATH + "mouse_normalized_parquet"
)
else:
dcc_df = extract_dcc_xml_files(spark_session, INPUT_PATH, "specimen")
mouse_df = get_specimens_by_type(dcc_df, "mouse")
mouse_df = clean_specimens(mouse_df)
mouse_df.write.mode("overwrite").parquet(
FIXTURES_PATH + "mouse_normalized_parquet"
)
return mouse_df
@pytest.fixture(scope="session")
def embryo_df(spark_session):
if os.path.exists(FIXTURES_PATH + "embryo_normalized_parquet"):
embryo_df = spark_session.read.parquet(
FIXTURES_PATH + "embryo_normalized_parquet"
)
else:
dcc_df = extract_dcc_xml_files(spark_session, INPUT_PATH, "specimen")
embryo_df = get_specimens_by_type(dcc_df, "embryo")
embryo_df = clean_specimens(embryo_df)
embryo_df.write.mode("overwrite").parquet(
FIXTURES_PATH + "embryo_normalized_parquet"
)
return embryo_df
@pytest.fixture(scope="session")
def pipeline_df(spark_session):
if os.path.exists(FIXTURES_PATH + "pipeline_parquet"):
pipeline_df = spark_session.read.parquet(FIXTURES_PATH + "pipeline_parquet")
else:
pipeline_df = extract_impress(
spark_session, "https://api.mousephenotype.org/impress/", "pipeline"
)
pipeline_df.write.mode("overwrite").parquet(FIXTURES_PATH + "pipeline_parquet")
return pipeline_df
@pytest.mark.skip(reason="no way of currently testing this")
class TestExperimentNormalizer:
def test_generate_metadata_group(
self, experiment_df, mouse_df, embryo_df, pipeline_df
):
specimen_cols = [
"_centreID",
"_specimenID",
"_colonyID",
"_isBaseline",
"_productionCentre",
"_phenotypingCentre",
"phenotyping_consortium",
]
mouse_specimen_df = mouse_df.select(*specimen_cols)
embryo_specimen_df = embryo_df.select(*specimen_cols)
specimen_df = mouse_specimen_df.union(embryo_specimen_df)
experiment_df = experiment_df.alias("experiment")
specimen_df = specimen_df.alias("specimen")
experiment_specimen_df = experiment_df.join(
specimen_df,
(experiment_df["_centreID"] == specimen_df["_centreID"])
& (experiment_df["specimenID"] == specimen_df["_specimenID"]),
).where(experiment_df["specimenID"].isin(["30216007"]))
experiment_specimen_df = generate_metadata_group(
experiment_specimen_df, pipeline_df
)
experiment_specimen_df.show(vertical=True, truncate=False)
assert True
def test_series_parameter_derivation(
self, experiment_df, mouse_df, embryo_df, pipeline_df, spark_session
):
experiment_df.where(col("unique_id").isNull()).show(
vertical=True, truncate=False
)
specimen_cols = [
"_centreID",
"_specimenID",
"_colonyID",
"_isBaseline",
"_productionCentre",
"_phenotypingCentre",
"phenotyping_consortium",
]
mouse_specimen_df = mouse_df.select(*specimen_cols)
embryo_specimen_df = embryo_df.select(*specimen_cols)
specimen_df = mouse_specimen_df.union(embryo_specimen_df)
experiment_df = experiment_df.alias("experiment")
specimen_df = specimen_df.alias("specimen")
experiment_specimen_df = experiment_df.join(
specimen_df,
(experiment_df["_centreID"] == specimen_df["_centreID"])
& (experiment_df["specimenID"] == specimen_df["_specimenID"]),
)
experiment_specimen_df = get_derived_parameters(
spark_session,
experiment_specimen_df.where(
(experiment_specimen_df.specimenID == "IM0011_b0047F")
& (experiment_specimen_df._procedureID == "IMPC_IPG_001")
),
pipeline_df,
)
experiment_specimen_df.show(vertical=True, truncate=False)
def test_provided_derivation(
self, experiment_df, mouse_df, embryo_df, pipeline_df, spark_session
):
specimen_cols = [
"_centreID",
"_specimenID",
"_colonyID",
"_isBaseline",
"_productionCentre",
"_phenotypingCentre",
"phenotyping_consortium",
]
mouse_specimen_df = mouse_df.select(*specimen_cols)
embryo_specimen_df = embryo_df.select(*specimen_cols)
specimen_df = mouse_specimen_df.union(embryo_specimen_df)
specimen_df.where(col("_specimenID") == "IM0023_d0089M").show()
experiment_df = experiment_df.alias("experiment")
specimen_df = specimen_df.alias("specimen")
experiment_specimen_df = experiment_df.join(
specimen_df,
(experiment_df["_centreID"] == specimen_df["_centreID"])
& (experiment_df["specimenID"] == specimen_df["_specimenID"]),
)
experiment_specimen_df.where(
experiment_specimen_df.specimenID == "JMC400007078"
).where(
(experiment_specimen_df._procedureID == "IMPC_DXA_001")
| (experiment_specimen_df._procedureID == "IMPC_OFD_001")
).show(
100, vertical=True, truncate=False
)
experiment_specimen_df = get_derived_parameters(
spark_session,
experiment_specimen_df.where(
experiment_specimen_df.specimenID == "JMC400007078"
),
pipeline_df,
)
experiment_specimen_df.where(
(experiment_specimen_df._procedureID == "IMPC_DXA_001")
| (experiment_specimen_df._procedureID == "IMPC_OFD_001")
).show(100, vertical=True, truncate=False)
def test_retina_combined(
self, experiment_df, mouse_df, embryo_df, pipeline_df, spark_session
):
specimen_cols = [
"_centreID",
"_specimenID",
"_colonyID",
"_isBaseline",
"_productionCentre",
"_phenotypingCentre",
"phenotyping_consortium",
]
mouse_specimen_df = mouse_df.select(*specimen_cols)
embryo_specimen_df = embryo_df.select(*specimen_cols)
specimen_df = mouse_specimen_df.union(embryo_specimen_df)
experiment_df = experiment_df.alias("experiment")
specimen_df = specimen_df.alias("specimen")
experiment_specimen_df = experiment_df.join(
specimen_df,
(experiment_df["_centreID"] == specimen_df["_centreID"])
& (experiment_df["specimenID"] == specimen_df["_specimenID"]),
)
experiment_specimen_df = get_derived_parameters(
spark_session,
experiment_specimen_df.where(
(experiment_specimen_df.specimenID == "IM0023_d0004F")
& (experiment_specimen_df._procedureID == "IMPC_EYE_001")
),
pipeline_df,
)
experiment_specimen_df.show(vertical=True, truncate=False)
def test_get_closest_weight(
self, experiment_df, mouse_df, embryo_df, pipeline_df, spark_session
):
specimen_w = [
dict(
weightDate="2015-11-20",
weightParameterID="IMPC_BWT_001_001",
weightValue="31.7",
weightDaysOld="111",
),
dict(
weightDate="2015-09-18",
weightParameterID="IMPC_BWT_001_001",
weightValue="23.1",
weightDaysOld="48",
),
dict(
weightDate="2015-10-09",
weightParameterID="IMPC_BWT_001_001",
weightValue="26.6",
weightDaysOld="69",
),
dict(
weightDate="2015-10-06",
weightParameterID="IMPC_GRS_003_001",
weightValue="25.7",
weightDaysOld="66",
),
dict(
weightDate="2015-09-11",
weightParameterID="IMPC_BWT_001_001",
weightValue="21.1",
weightDaysOld="41",
),
dict(
weightDate="2015-09-04",
weightParameterID="IMPC_BWT_001_001",
weightValue="18.8",
weightDaysOld="34",
),
dict(
weightDate="2015-10-23",
weightParameterID="IMPC_BWT_001_001",
weightValue="25.4",
weightDaysOld="83",
),
dict(
weightDate="2015-08-28",
weightParameterID="IMPC_BWT_001_001",
weightValue="16.5",
weightDaysOld="27",
),
dict(
weightDate="2015-11-06",
weightParameterID="IMPC_BWT_001_001",
weightValue="27.7",
weightDaysOld="97",
),
dict(
weightDate="2015-10-02",
weightParameterID="IMPC_BWT_001_001",
weightValue="24.7",
weightDaysOld="62",
),
dict(
weightDate="2015-09-25",
weightParameterID="IMPC_BWT_001_001",
weightValue="23.5",
weightDaysOld="55",
),
dict(
weightDate="2015-11-11",
weightParameterID="IMPC_DXA_001_001",
weightValue="29.3",
weightDaysOld="102",
),
dict(
weightDate="2015-10-16",
weightParameterID="IMPC_BWT_001_001",
weightValue="28",
weightDaysOld="76",
),
dict(
weightDate="2015-11-13",
weightParameterID="IMPC_BWT_001_001",
weightValue="28.2",
weightDaysOld="104",
),
dict(
weightDate="2015-10-30",
weightParameterID="IMPC_BWT_001_001",
weightValue="28.6",
weightDaysOld="90",
),
]
experiment_date = "2015-10-30"
procedure_group = "IMPC_BWT"
print(_get_closest_weight(experiment_date, procedure_group, specimen_w))
def test_body_weight_calc(
self, experiment_df, mouse_df, embryo_df, pipeline_df, spark_session
):
experiment_df = get_associated_body_weight(experiment_df, mouse_df)
experiment_df.where((experiment_df.specimenID == "IM0025_e0098M")).show(
vertical=True, truncate=False
)
def test_generate_age_information(self, experiment_df, mouse_df, spark_session):
experiment_df = generate_age_information(experiment_df, mouse_df)
experiment_df.where(col("specimenID") == "EADE00012009").show()
experiment_df.where(col("specimenID") == "IM0011_b0046F").show()
|
n,k=map(int,input().split(' '))
arr=[]
for i in range(n):
arr.append(int(input()))
i=len(arr)-1
count=0
while True:
if 0==k:
break
if k>=arr[i]:
count+= k//arr[i]
k%=arr[i]
i-=1
print(count) |
from __future__ import absolute_import
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from Qshop.celery import app
@app.task
def add():
x = 1
y = 2
return x+y
@app.task
def sendmail():
#第三方SMTP服务
from Qshop.settings import MAIL_PORT,MAIL_SENDER,MAIL_SERVER,MAIL_PASSWORD
subject = '猜猜我是谁'
content = "韦韦韦 MUA~"
print(content)
#构建邮件格式
message = MIMEText(content,"plain","utf-8")
message["To"] = Header("1527473992@qq.com",'utf-8')
message["From"] = Header(MAIL_SENDER,'utf-8')
message["Subject"] = Header(subject,'utf-8')
#发送邮件
smtp = smtplib.SMTP()
smtp.connect(MAIL_SERVER, 25)
smtp.login(MAIL_SENDER,MAIL_PASSWORD)
smtp.sendmail(MAIL_SENDER,"1527473992@qq.com",message.as_string())
|
# coding: utf-8
# In[2]:
import pandas as pd
import matplotlib.pyplot as plt, matplotlib.image as mpimg
from sklearn.model_selection import train_test_split
from sklearn import svm
get_ipython().run_line_magic('matplotlib', 'inline')
# In[3]:
labeled_images = pd.read_csv('/home/andrei/PycharmProjects/ds-ml/MNIST_nn/train.csv')
images = labeled_images.iloc[0:5000,1:]
labels = labeled_images.iloc[0:5000,:1]
train_images, test_images,train_labels, test_labels = train_test_split(images, labels, train_size=0.8, random_state=0)
# In[29]:
image_id=3451
img=train_images.iloc[image_id].as_matrix()
img=img.reshape((28,28))
plt.imshow(img,cmap='gray')
plt.title(train_labels.iloc[image_id,])
# In[31]:
plt.hist(train_images.iloc[image_id])
# In[32]:
clf = svm.SVC()
clf.fit(train_images, train_labels.values.ravel())
clf.score(test_images,test_labels)
# In[35]:
# binary representation
threshold = 10
test_images[test_images>threshold]=1
train_images[train_images>threshold]=1
img=train_images.iloc[image_id].as_matrix().reshape((28,28))
plt.imshow(img,cmap='binary')
plt.title(train_labels.iloc[image_id])
# In[36]:
plt.hist(train_images.iloc[image_id])
# In[44]:
clf = svm.SVC(C=7, gamma=0.009)
clf.fit(train_images, train_labels.values.ravel())
clf.score(test_images,test_labels)
# In[41]:
test_data=pd.read_csv('/home/andrei/PycharmProjects/ds-ml/MNIST_nn/test.csv')
test_data[test_data>10]=1
results=clf.predict(test_data)
|
#!/usr/bin/python
import re
urlForUserName = "https://www.youtube.com/user/JorgeLuisPeralta"
urlChannel = "https://www.youtube.com/channel/UC-q80GTFK2A0Y6I5ClT-8TQ"
exRegUser = r'https://www.youtube.com/user/(.*)'
exRegChannel = r'https://www.youtube.com/channel/(.*)'
matchUrlUser = re.match( exRegUser, urlForUserName, re.M|re.I)
if matchUrlUser:
print("matchObj.group() : ", matchUrlUser.group())
print("matchObj.group(1) : ", matchUrlUser.group(1))
else:
print("No match!!")
matchUrlUser = re.match( exRegUser, urlChannel, re.M|re.I)
if matchUrlUser:
print("matchObj.group() : ", matchUrlUser.group())
print("matchObj.group(1) : ", matchUrlUser.group(1))
else:
print("No match!!")
matchUrlChannel = re.match( exRegChannel, urlForUserName, re.M|re.I)
if matchUrlChannel:
print("matchObj.group() : ", matchUrlChannel.group())
print("matchObj.group(1) : ", matchUrlChannel.group(1))
else:
print("No match!!")
matchUrlChannel = re.match(exRegChannel, urlChannel, re.M|re.I)
if matchUrlChannel:
print("matchObj.group() : ", matchUrlChannel.group())
print("matchObj.group(1) : ", matchUrlChannel.group(1))
else:
print("No match!!")
|
from matplotlib import pyplot as plt
from PIL import Image
from pathlib import Path
if __name__ == '__main__':
csv_path = Path('train_result.csv')
with csv_path.open() as f:
x_times = 5
y_times = 5
fig = plt.figure()
for i in range(1, x_times * y_times + 1):
info = f.readline().strip().split(',')
img = Image.open(info[0])
ax = fig.add_subplot(x_times, y_times, i)
imgplot = plt.imshow(img)
ax.set_title(info[1])
plt.show()
|
from threading import *
import time
l = Lock()
def wish(name):
l.acquire()
for i in range(10):
print("[ Good Evening : ", end="")
time.sleep(1)
print(name, ']')
l.release()
t1 = Thread(target=wish, args=("Dhoni",))
t2 = Thread(target=wish, args=("Yuvraj",))
t3 = Thread(target=wish, args=("Kohli",))
t1.start()
t2.start()
t3.start()
|
import aiohttp
from collections import defaultdict, deque
from pathlib import Path
from functools import partial
import asyncio
from .constants import MatColors, GRAPH_TYPES, SAMPLING_FREQS
from .charts import CHARTS
import os
import streamlit as st
def add_custom_css():
st.markdown(
f"""
<style>
.stocks-up {{
background: {MatColors.GREEN_700.value} !important;
width: fit-content !important;
padding: 10px !important;
color: white !important;
font-weight: 700 !important;
border-radius: 10px !important;
}}
.stocks-down {{
background: {MatColors.RED_700.value} !important;
width: fit-content !important;
padding: 10px !important;
color: white !important;
font-weight: 700 !important;
border-radius: 10px !important;
}}
</style>
""",
unsafe_allow_html=True,
)
def colored_text(price, change):
return f"""
<p class={["stocks-down", "stocks-up"][change>0]}>{price} {change:+.2f} </p>
"""
def create_data_box(container, channel_name, global_graph_type, global_sampling_freq):
container.subheader(channel_name)
up_down = container.empty()
graph = container.empty()
with container.beta_expander("Chart Config"):
graph_type = st.selectbox(
"Graph Type",
GRAPH_TYPES,
key=f"gtype{channel_name}",
index=GRAPH_TYPES.index(global_graph_type)
)
sampling_freq = st.selectbox(
"Sampling Frequency",
SAMPLING_FREQS,
key=f"stype{channel_name}",
index=SAMPLING_FREQS.index(global_sampling_freq)
)
return {
"stat": up_down,
"chart": graph,
"chart_type": graph_type,
"chart_opt": {"sampling_freq": sampling_freq},
}
def process_message(channel_data, graph):
prev = channel_data[-2]["price"]
current = channel_data[-1]["price"]
graph["stat"].markdown(
colored_text(current, current - prev), unsafe_allow_html=True
)
chart_key = graph["chart_type"].lower()
graph["chart"].altair_chart(
CHARTS[chart_key](channel_data, **graph["chart_opt"]), use_container_width=True
)
async def consumer(graphs, selected_channels, status, state):
TKN = state.token or os.getenv("FINNHUB_TOKEN")
WS_CONN = f"wss://ws.finnhub.io?token={TKN}"
state.windows = state.windows or defaultdict(partial(deque, maxlen=1_000))
for channel, graph in graphs.items():
if state.windows[channel] and len(state.windows[channel]) > 2:
process_message(channel_data=state.windows[channel], graph=graph)
else:
graph["chart"].info(f"Waiting for data in channel {channel}")
async with aiohttp.ClientSession(trust_env=True) as session:
status.subheader(f"Connecting...")
async with session.ws_connect(WS_CONN) as websocket:
status.subheader(f"Connected.")
for symbl in graphs:
await websocket.send_json({"type": "subscribe", "symbol": symbl})
async for message in websocket:
data = message.json()
if "data" in data:
for d in data["data"]:
state.windows[d["s"]].append({"ts": d["t"], "price": d["p"]})
for channel, graph in graphs.items():
if state.windows[channel] and len(state.windows[channel]) > 2:
process_message(
channel_data=state.windows[channel], graph=graph
)
|
#!/usr/bin/python
####################################
# Py joins on number ranges
# author: vladimir kulyukin
####################################
range1 = xrange(1, 6)
def join_number_range(separator, rng):
return separator.join([str(x) for x in rng])
def range_tests(r):
print join_number_range('*', r)
print join_number_range(' ** ', r)
print join_number_range('//', r)
range_tests(range1)
|
def fak(n):
if n == 1:
return 1
return n * fak(n -1)
rest = fak(100)
answer = 0
while rest > 0:
answer += rest % 10
rest /=10
print answer
|
import math
import os
import random
import re
import sys
# Complete the hourglassSum function below.
def hourglassSum(arr):
i,j=0,0
maxnum=-1*9*9-1
for i in range(len(arr)):
if i == len(arr)-2:break
for j in range(len(arr[i])):
if j == len(arr[i])-2:break
a,b,c,d,e,f,g=arr[i][j],arr[i][j+1],arr[i][j+2],arr[i+1][j+1],arr[i+2][j],arr[i+2][j+1],arr[i+2][j+2]
maxnum = max(maxnum,a+b+c+d+e+f+g)
return maxnum
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr = []
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
result = hourglassSum(arr)
fptr.write(str(result) + '\n')
fptr.close()
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# Define 3D shape
block = np.array([
[[1, 1, 0],
[1, 0, 0],
[0, 1, 0]],
[[1, 1, 0],
[1, 1, 1],
[1, 0, 0]],
[[1, 1, 0],
[1, 1, 1],
[0, 1, 0]],
[[1, 0, 0],
[1, 1, 1],
[0, 1, 0]]
])
ax = plt.subplot(projection='3d')
pc = Poly3DCollection(block, facecolors='b', shade=True)
ax.add_collection(pc)
plt.show() |
from django.test import TestCase
from channels.models import Channel
from talks.models import Talk
# Create your tests here.
class SitemapTest(TestCase):
def setUp(self):
chanel_1 = Channel.objects.create(code='1', title='channel title 1')
Talk.objects.create(code='1', title='talk title 1', channel=chanel_1)
Talk.objects.create(code='11', title='talk title same title', channel=chanel_1)
Talk.objects.create(code='12', title='talk title same title', channel=chanel_1)
def test_sitemap(self):
# Get sitemap
response = self.client.get('/sitemap.xml')
# Check post is present in sitemap
self.assertTrue("talk-title-1" in str(response.content))
# Check about page is present in sitemap
self.assertTrue("/corporate/about" in str(response.content))
# Check contact page is present in sitemap
self.assertTrue("/corporate/contact" in str(response.content))
|
import requests
from Get_All_Course import get_all_course
from Get_Exam_List import main as exams
def examlist(stuid):
courses = get_all_course(stuid)
openClassId = courses['openClassId']
courseOpenId = courses['courseOpenId']
exam_list = exams(openClassId, courseOpenId, stuid)
index = 1
for i in exam_list:
print(f"【{index}】{i['title']}")
target = int(input("请选择需要退回的考试:")) - 1
examId = exam_list[target]['examId']
examTermTimeId = exam_list[target]['examTermTimeId']
url = 'https://zjyapp.icve.com.cn/newmobileapi/onlineExam/getReadStuList'
data = {
'courseOpenId': courseOpenId,
'openClassId': openClassId,
'examId': examId,
'examTermTimeId': examTermTimeId,
}
html = requests.post(url=url, data=data).json()
print(html['msg'])
num = 0
for i in html['examStuList']:
if i['stuId'] == stuid:
examStuId = i['examStuId']
num = 1
if num == 0:
print("未发现你的试卷,请先提交")
from Main import main as menu
menu()
reurl = 'https://zjyapp.icve.com.cn/newmobileapi/onlineExam/rejectExam'
data = {
'examStuId': examStuId
}
html = requests.post(url=reurl, data=data).json()
input(html['msg'])
from Main import main as menu
menu()
if __name__ == '__main__':
examlist('')
|
import os
import subprocess
goodPing = "1 received"
os.chdir("/home/vagrant/project/lingi2142")
#subprocess.call(["sudo","./create_network.sh","project_topo"])
t=subprocess.Popen("ls",stdout=subprocess.PIPE)
print(t.communicate())
locations = ["SH1C","HALL","PYTH","STEV","CARN","MICH"]
prefixA = "fd00:3:0f"
print("--Start ping--")
for loc in locations :
with open("test/list_ip.txt") as list :
ip = list.readline()
print("----------------------------"+loc+"-------------------------------")
while ip:
p = subprocess.Popen("sudo test/ping.sh %s %s" % (loc, ip), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
if (output.find(goodPing)==-1): print(loc+" ---> " +ip)
ip = list.readline()
#res = subprocess.Popen(["sudo ip netns exec "+loc+" ping6 "+ ip+ " -c 1 -n -W 1 -q"],
# shell=True)
#res = subprocess.Popen("sudo ip netns exec %s ping6 %s -c 1 -n -W 1" % (loc,ip), stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,shell=True)
#output,error = res.communicate()
#output = res.stdout.decode("utf-8")
#output = subprocess.check_output(["sudo ip netns exec "+loc+" ping6 "+ ip+ " -c 1 -n -W 1 -q"])
#if (output.find(gPing)==-1) : print(ip+error)
#else : print("OK")
#ip = list.readline()
# ip = prefixA+str(i)+str(j)+"::"+str(j)
# res = subprocess.Popen("ip netns exec %s ping6 %s -c 1 -n -W 1" % (loc,ip), stdout=subprocess.PIPE,
# stderr=subprocess.PIPE, shell=True)
# output,error = res.communicate()
# if (output.find(gPing)==-1) : print(ip)
#subprocess.call(["sudo","./cleanup.sh"])
|
#-*- coding:utf-8 -*-
#tornado web site
import os
import sys
import urllib
import math
import time
import string
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.httpclient import HTTPClient
from tornado.escape import json_encode, json_decode
import map_logic
from define import *
from user_logic import *
from help_logic import *
###############################################################################
###############################################################################
###############################################################################
#todo:
#################################################################################
############################# Golbal data #######################################
#################################################################################
#################################################################################
############################# Functions #######################################
#################################################################################
def GetDistance( lng1, lat1, lng2, lat2):
'''
from http://only-u.appspot.com/?p=36001 method#4
'''
EARTH_RADIUS = 6378.137 # 地球周长/2*pi 此处地球周长取40075.02km pi=3.1415929134165665
from math import asin,sin,cos,acos,radians, degrees,pow,sqrt, hypot,pi
d=acos(cos(radians(lat1))*cos(radians(lat2))*cos(radians(lng1-lng2))+sin(radians(lat1))*sin(radians(lat2)))*EARTH_RADIUS*1000
return d
def test_mongo():
### 开发者可在requirements.txt中指定依赖pymongo使用
import pymongo
### 连接MongoDB服务
### 请在管理控制台获取host, port, db_name, api_key, secret_key的值
con = pymongo.Connection(host = "mongo.duapp.com", port = 8908)
db_name = "AXuAgnJLApkefuWllnSG"
db = con[db_name]
api_key = "IXj5gFpzufduRv3BnG9i255H"
secret_key = "yFmYQhLyItkE6i54CPRXBiQKkcOZa1UP"
db.authenticate(api_key, secret_key)
### 插入数据到集合test
collection_name = 'test'
db[collection_name].insert({"id":10, 'value':"test test"})
### 查询集合test
cursor = db[collection_name].find()
con.disconnect()
return "select collection test %s"%str(cursor[0])
def test_mymongo():
import db_driver
db = db_driver.DB('test')
db.SelectTable('test')
db.Insert({"id":10, 'value':"test test"})
cursor = db.Find()
return "select collection test %s"%str(cursor[0])
#################################################################################
############################# Classes #########################################
#################################################################################
class RootHandler(tornado.web.RequestHandler):
def get(self):
self.write("This is a BAE project: hostname: %s; platform %s;\
Try to load \"mutualhelp.buapp.com\\test_where\"" % (socket.gethostname(), platform.platform()))
def post(self):
self.write("got the post")
class WhereHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/where_are_you.html")
class LocationHandler(tornado.web.RequestHandler):
def post(self):
global jiayan_baidu_key, jiayan_latitude, jiayan_longitude, baidu_shanghai_map
street = self.get_argument("street")
city = self.get_argument("city")
street = street.encode("UTF-8")
city = city.encode("UTF-8")
street_string = urllib.quote(street)
city_string = urllib.quote(city)
# *TBD*, maybe can be replaced by tornado.escape.utf8.
# self.GeoLocation_request(street_string, city_string) ##the async method
key = urllib.quote(jiayan_baidu_key)
simple_client = HTTPClient()
response = simple_client.fetch("http://api.map.baidu.com/geocoder/v2/?ak=%s&output=json&address=%s&city=%s" % (key, street, city))
if response.error:
raise tornado.web.HTTPError(500)
location_json = tornado.escape.json_decode(response.body)
if location_json['status'] == 0:
try:
### this offset is added to fix people square can't display correctly, I think it's baidu's problem. ###
location_json['result']['location']['lng'] = location_json['result']['location']['lng'] + 0.000001
# print location_json['result']['location']['lng'], location_json['result']['location']['lat']
distance = GetDistance(float(jiayan_longitude), float(jiayan_latitude),\
float(location_json['result']['location']['lng']), float(location_json['result']['location']['lat']))
# map_link = baidu_shanghai_map + "&labels=%s,%s&labelStyles=MARK,1,14,0xffffff,0xff0000,1" % \
# (location_json['result']['location']['lng'], location_json['result']['location']['lat'])
map_link = baidu_shanghai_map + "&markers=%s,%s|%s,%s&marklStyles=s,A,0xff0000" % \
(location_json['result']['location']['lng'], location_json['result']['location']['lat'], \
jiayan_longitude, jiayan_latitude)
self.render("static/map_static.html",
latitude = location_json['result']['location']['lat'], \
longitude = location_json['result']['location']['lng'],\
map_img = map_link,\
distance = "%.2f" % distance,)
except:
self.write("<html><body><h2>Oops! Somthing Wrong! Havn't find your location!</h2></body><html>\n")
else:
self.write("<html><body><h2>Havn't find your location!</h2></body><html>\n")
class MutualHandler(tornado.web.RequestHandler):
def get(self):
log_print('get the get')
try:
self.write('%s' % test_mymongo())
#self.write('get the get')
except:
self.write('mongo test failed')
#pass
def post(self):
pass
class AppHandler(tornado.web.RequestHandler):
# self.request.remote_ip is the client's IP address
def get(self):
self.write("Mutual Server: HTTP GET is ok!\r\n")
def post(self):
msg_req = self.get_argument("PAYLOAD")
msg_req = json_decode(msg_req)
msg_type = msg_req["TYPE"]
if msg_type == MSG_USER_REGISTER_REQ:
msg_resp = UserRegisterAction(msg_req)
elif msg_type == MSG_USER_LOGIN_REQ:
msg_resp = UserLoginAction(msg_req)
elif msg_type == MSG_USER_LOGOUT_REQ:
msg_resp = UserLogoutAction(msg_req)
elif msg_type == MSG_USER_INFO_REQ:
msg_resp = UserInfoAction(msg_req)
elif msg_type == MSG_USER_POLLING_REQ:
msg_resp = UserPollingAction(msg_req)
elif msg_type == MSG_USER_REPORT_REQ:
msg_resp = UserReportAction(msg_req)
elif msg_type == MSG_HELP_ADD_REQ:
msg_resp = HelpAddAction(msg_req)
elif msg_type == MSG_HELP_INFO_REQ:
msg_resp = HelpInfoAction(msg_req)
elif msg_type == MSG_HELP_ACCEPT_REQ:
msg_resp = HelpAcceptAction(msg_req)
elif msg_type == MSG_HELP_REJECT_REQ:
msg_resp = HelpRejectAction(msg_req)
elif msg_type == MSG_HELP_FINISH_REQ:
msg_resp = HelpFinishAction(msg_req)
else:
msg_resp = {"reason":"this msg type is unsupported yet!\r\n"}
self.write(json_encode(msg_resp))
class TestUserRegHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/user_register_db.html")
def post(self):
name = self.get_argument("NAME")
tel = self.get_argument("TEL")
sex = self.get_argument("SEX")
age = self.get_argument("AGE")
job = self.get_argument("JOB")
nickname = self.get_argument("NICKNAME")
email = self.get_argument("EMAIL")
longitude = self.get_argument("LONGITUDE")
latitude = self.get_argument("LATITUDE")
address = self.get_argument("ADDRESS")
icon_index = self.get_argument("ICON_INDEX")
icon = self.get_argument("ICON")
passwd = self.get_argument("PASSWD")
msg_req = {u"TYPE":MSG_USER_REGISTER_REQ ,u"NAME":name, u"TEL":tel, u"SEX":sex, \
u"AGE":age, u"JOB":job, u"PASSWD":passwd, u"NICKNAME":nickname, u"EMAIL":email,\
u"LONGITUDE":longitude, u"LATITUDE":latitude, u"ADDRESS":address, \
u"ICON_INDEX":icon_index, u"ICON":icon,}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = UserRegisterAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestUserLoginHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/user_login_db.html")
def post(self):
tel = self.get_argument("TEL")
passwd = self.get_argument("PASSWD")
msg_req = {u"TYPE":MSG_USER_LOGIN_REQ, u"TEL":tel, u"PASSWD":passwd}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = UserLoginAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestUserLogoutHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/user_logout_db.html")
def post(self):
tel = self.get_argument("TEL")
msg_req = {u"TYPE":MSG_USER_LOGOUT_REQ ,u"TEL":tel}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = UserLogoutAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestUserInfoHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/user_info_db.html")
def post(self):
userid = self.get_argument("USERID")
msg_req = {u"TYPE":MSG_USER_INFO_REQ ,u"USERID":userid}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = UserInfoAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestUserPollingHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/user_polling_db.html")
def post(self):
userid = self.get_argument("USERID")
update_item = string.atoi(self.get_argument("UPDATE_ITEM"))
msg_req = {u"TYPE":MSG_USER_POLLING_REQ, u"USERID":userid, u"UPDATE":update_item}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = UserPollingAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestUserReportHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/user_report_db.html")
def post(self):
userid = self.get_argument("USERID")
lat = int(self.get_argument("LATITUDE"))
lon = int(self.get_argument("LONGITUDE"))
msg_req = {u"TYPE":MSG_USER_REPORT_REQ, u"USERID":userid, u"UPDATE":REPORT_USER_LOCATION,
u"LATITUDE":lat, u"LONGITUDE":lon}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = UserReportAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestUserDistanceHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/user_distance.html")
def post(self):
userid1 = self.get_argument("USERID1")
userid2 = self.get_argument("USERID2")
msg_req = {u"USERID1":userid1, u"USERID2":userid2}
#msg_req = json_encode(msg_req)
#self.write("<br>\r\nclient msg:\r\n<br>")
#self.write(msg_req)
#msg_req = json_decode(msg_req)
msg_resp = UsersGetDistanceAction(userid1, userid2)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestHelpAddHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/help_add_db.html")
def post(self):
title = self.get_argument("TITLE")
userid = self.get_argument("USERID")
catagory = self.get_argument("CATAGORY")
place = self.get_argument("RANGE")
timeout = self.get_argument("TIMEOUT")
content = self.get_argument("CONTENT")
lat = self.get_argument("LATITUDE")
lon = self.get_argument("LONGITUDE")
address = self.get_argument("ADDRESS")
start_time = self.get_argument("START_TIME")
score = int(self.get_argument("SCORE"))
phone = self.get_argument("PHONE")
msg_req = {u"TYPE":MSG_HELP_ADD_REQ, u"TITLE":title, u"USERID":userid, u"CATAGORY":catagory,\
u"LATITUDE":lat, u"LONGITUDE":lon, u"RANGE":place, u"TIMEOUT":timeout, u"CONTENT":content,
u"ADDRESS":address, u"START_TIME":start_time, u"SCORE": score, u"PHONE": phone}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = HelpAddAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestHelpAcceptHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/help_accept_db.html")
def post(self):
userid = self.get_argument("USERID")
helpid = self.get_argument("HELPID")
msg_req = {u"TYPE":MSG_HELP_ACCEPT_REQ, u"USERID":userid, u"HELPID":helpid}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = HelpAcceptAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestHelpRejectHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/help_reject_db.html")
def post(self):
userid = self.get_argument("USERID")
helpid = self.get_argument("HELPID")
msg_req = {u"TYPE":MSG_HELP_REJECT_REQ, u"USERID":userid, u"HELPID":helpid}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = HelpRejectAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestHelpFinishHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/help_finish_db.html")
def post(self):
userid = self.get_argument("USERID")
helpid = self.get_argument("HELPID")
finished_helpers = []
finished_helpers.append({"FINISHER_ID": self.get_argument("finisher1_id"),
"FINISHER_SCORE": int(self.get_argument("finisher1_score"))})
finished_helpers.append({"FINISHER_ID": self.get_argument("finisher2_id"),
"FINISHER_SCORE": int(self.get_argument("finisher2_score"))})
msg_req = {u"TYPE":MSG_HELP_FINISH_REQ, u"USERID":userid, u"HELPID":helpid, u"FINISHED_HELPERS":finished_helpers}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = HelpFinishAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
class TestHelpInfoHandler(tornado.web.RequestHandler):
def get(self):
self.render("static/help_info_db.html")
def post(self):
helpid = self.get_argument("HELPID")
msg_req = {u"TYPE":MSG_HELP_INFO_REQ ,u"HELPID":helpid}
msg_req = json_encode(msg_req)
self.write("<br>\r\nclient msg:\r\n<br>")
self.write(msg_req)
msg_req = json_decode(msg_req)
msg_resp = HelpInfoAction(msg_req)
self.write("<br>\r\nserver msg:\r\n<br>")
self.write(json_encode(msg_resp))
#################################################################################
############################# Main Entry ######################################
#################################################################################
settings = {
'debug' : True,
"static_path": os.path.join(os.path.dirname(__file__), "static"),
}
url_handlers = [
(r"/", RootHandler),
(r"/app", AppHandler),
(r"/mutual", MutualHandler),
(r"/test_where", WhereHandler), # a self-test for handler
(r"/test_location", LocationHandler), # a self-test for handler
(r"/test_user_register", TestUserRegHandler),
(r"/test_user_login", TestUserLoginHandler),
(r"/test_user_logout", TestUserLogoutHandler),
(r"/test_user_info", TestUserInfoHandler),
(r"/test_user_polling", TestUserPollingHandler),
(r"/test_user_report", TestUserReportHandler),
(r"/test_user_distance", TestUserDistanceHandler),
(r"/test_help_add", TestHelpAddHandler),
(r"/test_help_accept", TestHelpAcceptHandler),
(r"/test_help_reject", TestHelpRejectHandler),
(r"/test_help_finish", TestHelpFinishHandler),
(r"/test_help_info", TestHelpInfoHandler),
]
log_print('starting...')
if UserDBInit(UserDBName, UserDBTable) == STATUS_FAIL:
log_print('failed to init user db')
sys.exit()
if HelpDBInit(HelpDBName, HelpDBTable) == STATUS_FAIL:
log_print('failed to init help db')
sys.exit()
if IS_BAE == True:
app = tornado.wsgi.WSGIApplication(url_handlers,**settings)
from bae.core.wsgi import WSGIApplication
application = WSGIApplication(app)
else:
class Application(tornado.web.Application):
def __init__(self):
tornado.web.Application.__init__(self, url_handlers, **settings)
from tornado.options import define, options
define("port", default=8008, help="run on the given port", type=int)
if __name__ == "__main__":
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
log_print('tornado server listening at %d' % options.port)
tornado.ioloop.IOLoop.instance().start()
|
import csv
#import pip._vendor.requests as requests
import requests
import logging
import os
import argparse
from requests import ReadTimeout, ConnectTimeout, HTTPError, Timeout, ConnectionError
class MergeCsvRecords(object):
def __init__(self, url='http://interview.wpengine.io/v1/accounts'):
self.url = url
self.session = requests.Session()
def url_ok(self):
try:
r = requests.head(self.url)
r.raise_for_status()
except (ConnectTimeout, HTTPError, ReadTimeout, Timeout, ConnectionError):
raise ValueError('Api Server is not available, please retry after some time....')
def readcsv(self, inputfile):
if not os.path.exists(inputfile):
raise FileNotFoundError
with open(inputfile, 'r') as fd:
reader = csv.DictReader(fd)
print(reader.fieldnames)
if 'Account ID' not in reader.fieldnames:
#raise ValueError('No Account Id found')
print(f'Account ID missing... {reader.fieldnames}')
for row in reader:
yield row
def fetchaccountinfo(self, accountid):
try:
resp = self.session.get(f'{self.url}/{accountid}')
resp.raise_for_status()
return resp.json()
except Exception as ex:
logging.error(f'unable to process account id {accountid}. error: {ex}')
def generateoutput(self, inputfile, outputfile):
with open(outputfile, 'w', newline='') as csvfile:
fieldnames = ['Account ID', 'First Name', 'Created On', 'Status', 'Status Set On']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in self.readcsv(inputfile):
del row['Account Name']
accountinfo = self.fetchaccountinfo(row['Account ID'])
print(f'Account info...{accountinfo}')
if not accountinfo:
continue
row['Status'] = accountinfo['status']
row['Status Set On'] = accountinfo['created_on']
writer.writerow(row)
def parse_arguments() -> object:
parser = argparse.ArgumentParser(description="Convert Json")
# Positional mandatory arguments
parser.add_argument("-i","--inputfile", help="Input CSV data file", type=str)
parser.add_argument("-o","--output", help="Output CSV file", type=str)
parser.add_argument("-v", "--verbose", help="Increase output verbose, more v more output.", action="count", default=0)
return parser.parse_args()
def main():
logging.basicConfig(level=logging.INFO)
args = parse_arguments()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
mergecsv = MergeCsvRecords()
mergecsv.url_ok()
mergecsv.generateoutput(args.inputfile, args.output)
if __name__ == "__main__":
main()
|
#import RPi.GPIO as GPIO
import time
import serial
port = serial.Serial("/dev/ttyAMA0", baudrate=115200, timeout=1.0)
#var count = 0
#GPIO.setmode(GPIO.BCM)
#GPIO.setup(25, GPIO.OUT)
while True:
#GPIO.output(25, False)
port.write("hello from Rpi\r\n");
time.sleep(1000) |
'''
Created on 20.11.2016
@author: simon
'''
from distutils.core import setup,Extension
from Cython.Build import cythonize
ext=Extension(name="filters",
sources=["filters.pyx"],
extra_compile_args=['-fopenmp'],
extra_link_args=['-lgomp','-fopenmp']
)
setup(
ext_modules = cythonize([ext],annotate=True)
) |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 15:18:09 2020
@author: Justyn
"""
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from bs4 import BeautifulSoup as bs
import time
import re
class Scraper:
def __init__(self):
self.driver = webdriver.Chrome(r'C:/Users/Justyn/Documents/Data science/chromedriver_win32/chromedriver.exe')
self.url = 'https://carsandbids.com/past-auctions/'
self.hrefs = []
#returns hrefs
def get_hrefs(self):
return self.hrefs
def get_features(self, url):
self.get_info(url)
return self.features
def load_url(self):
self.driver.get(self.url)
def scroll_down(self):
self.driver.execute_script('window.scroll(0, document.body.scrollHeight)')
#pulls all text once scrolled to the bottom of page
#finds link for individual car webpage and puts in a list callled hrefs
def read_data(self):
main = self.driver.find_element_by_xpath('//*[@id="root"]/div[2]/div[2]/div/ul')
soup = bs(main.get_attribute('innerHTML'), 'html.parser')
for elem in soup.findAll('a', {'class':'hero'}):
href = str(elem['href'])
self.hrefs.append(href)
#takes individual car webpage urls and pulls features from each
#adds list of features called features
def get_info(self, url):
self.features = []
self.driver.get(url)
#pulls car year and adds to feature list
year_heading = WebDriverWait(self.driver, 300).until(EC.presence_of_element_located((By.XPATH, '//*[@id="root"]/div[2]/div[1]/div/div[1]')))
yr_soup = bs(year_heading.get_attribute('innerHTML'), 'html.parser')
yr_regex = re.compile('(\d+)')
yr = yr_regex.findall(yr_soup.text)
self.features.append(yr[0])
#pulls car details and adds to feature list (make, model, mileage, powertrain, location, etc)
quick_facts = self.driver.find_element_by_xpath('//*[@id="root"]/div[2]/div[2]/div[1]/div[2]')
qf_soup = bs(quick_facts.get_attribute('innerHTML'), 'html.parser')
for dd in qf_soup.findAll('dd'):
self.features.append(dd.text)
#gets selling price of vehicle. If reserve wasn't met then highest bid returned
selling_price = self.driver.find_element_by_class_name('bid-value')
sp_soup = bs(selling_price.get_attribute('innerHTML'), 'html.parser')
self.features.append(sp_soup.text)
#Determine if car listing had a reserve
no_reserve_tag = self.driver.find_element_by_xpath('//*[@id="root"]/div[2]/div[1]/div/div[2]')
ns_soup = bs(no_reserve_tag.get_attribute('innerHTML'), 'html.parser')
if 'No Reserve' in ns_soup.text:
self.features.append('No reserve')
else:
self.features.append('Has reserve')
#Get horsepower and torque stats for single webpage
power_text = self.driver.find_element_by_xpath('//*[@id="root"]/div[2]/div[2]/div[1]/div[3]/div[2]/div')
p_soup = bs(power_text.get_attribute('innerHTML'), 'html.parser')
hp_regex = re.compile(('(\d+)\s*horsepower|(\d+)\s*hp'))
hp_num = hp_regex.findall(p_soup.text)
self.features.append(hp_num)
torque = re.compile(('(\d+)\s*lb'))
torque_num = torque.findall(p_soup.text)
self.features.append(torque_num)
|
dInicial = int(input().split()[1])
h = input().split(':')
hi = int(h[0])
mi = int(h[1])
si = int(h[2])
dFinal = int(input().split()[1])
h = input().split(':')
hf = int(h[0])
mf = int(h[1])
sf = int(h[2])
days = dFinal - dInicial
hour = hf - hi
if hour < 0:
hour += 24
days -= 1
min = mf - mi
if min < 0:
min += 60
hour -= 1
sec = sf - si
if sec < 0:
sec += 60
min -= 1
print('{} dia(s)'.format(days))
print('{} hora(s)'.format(hour))
print('{} minuto(s)'.format(min))
print('{} segundo(s)'.format(sec))
|
import sys
import listsearch
sys.path.append(r"C:\DevLcl\Sandbox\python-sandbox\think_python\chapter_10")
fin = open(r'C:\DevLcl\Sandbox\python-sandbox\think_python\words.txt')
def word_list():
thelist = []
for line in fin:
thelist.append(line.strip())
return thelist
def find_interlocks2(mylist, word):
w1 = word[::2]
w2 = word[1::2]
if listsearch.search_in_list(mylist, w1) and listsearch.search_in_list(mylist, w2):
print(word, w1, w2)
def find_interlocks3(mylist, word):
w1 = word[::3]
w2 = word[1::3]
w3 = word[2::3]
if listsearch.search_in_list(mylist, w1) \
and listsearch.search_in_list(mylist, w2) \
and listsearch.search_in_list(mylist, w3):
print(word, w1, w2, w3)
def interlocks2():
mylist = word_list()
mylist.sort()
for word in mylist:
find_interlocks2(mylist, word)
def interlocks3():
mylist = word_list()
mylist.sort()
for word in mylist:
find_interlocks3(mylist, word)
interlocks3()
|
'''
Created on Dec 30, 2020
@author: mballance
'''
import asyncio
import datetime
import multiprocessing
import os
import subprocess
import sys
from asyncio.subprocess import DEVNULL, STDOUT
from asyncio.tasks import FIRST_COMPLETED
from colorama import Fore
from colorama import Style
from typing import List
from mkdv.job_queue import JobQueue
from mkdv.job_spec import JobSpec
class Runner(object):
def __init__(self, root, backend, specs):
self.root = root
self.backend = backend
self.specs = specs
self.maxpar = -1
self.rerun_failing = True
self.limit_time = None
self.tool = None
async def runjobs(self):
start = datetime.datetime.now()
loop = asyncio.get_event_loop()
# Ensure each test has a unique name
name_m = {}
cache_m = {}
cache_id = 0
n_passed = 0
n_failed = 0
# Map of mkdv.mk path -> job_queue
queue_m = {}
# Ensure we create the report directory first
os.makedirs(os.path.join(self.root, "report"), exist_ok=True)
# Sort specs into the queues
for s in self.specs:
if s.mkdv_mk not in queue_m.keys():
queue_m[s.mkdv_mk] = JobQueue(s.mkdv_mk)
queue_m[s.mkdv_mk].jobs.append(s)
active_queues = list(queue_m.values())
status = await self.run_builds(active_queues)
# Propagate the cachedir
for q in active_queues:
for j in q.jobs:
j.cachedir = q.cachedir
# Shove everything back in a single queue
run_q = []
for q in active_queues:
run_q.extend(q.jobs)
if not status:
return
active_procs = []
if self.maxpar == -1:
avail_jobs = self.backend.quota()
else:
avail_jobs = self.maxpar
print("maxpar: %d %d" % (self.maxpar, avail_jobs))
while len(run_q) > 0 or len(active_procs) > 0:
# Launch new jobs while there is quota
# and
while len(active_procs) < avail_jobs and len(run_q) > 0:
# TODO: could randomize selection
spec = run_q.pop(0)
if spec.rundir is None:
rundir = os.path.join(self.root, spec.fullname)
if spec.fullname in name_m.keys():
# Need to disambiguate
id = name_m[spec.fullname]+1
rundir += "_%04d" % (id,)
name_m[spec.fullname] = id
else:
name_m[spec.fullname] = 0
spec.rundir = rundir
os.makedirs(spec.rundir, exist_ok=True)
# cmdline = ['srun', '-E']
# cmdline = ['srun', '--nodelist=oatfieldx1,oatfieldx2']
# cmdline = ['srun']
cmdline = []
cmdline.extend([sys.executable, "-m", "mkdv.wrapper"])
cmdline.append(os.path.join(rundir, "job.yaml"))
self.init_spec(spec)
self.write_job_yaml(
os.path.join(rundir, "job.yaml"),
spec.cachedir,
spec)
if spec.rerun:
print(f"{Fore.YELLOW}[Start]{Style.RESET_ALL} %s (rerun)" % spec.fullname)
else:
print(f"{Fore.YELLOW}[Start]{Style.RESET_ALL} %s" % spec.fullname)
sys.stdout.flush()
# proc = await asyncio.subprocess.create_subprocess_exec(
# *cmdline,
# cwd=spec.rundir)
proc = await self.backend.launch(
cmdline,
cwd=spec.rundir)
active_procs.append((proc,spec,None))
# Wait for at least once job to complete
done, pending = await asyncio.wait(
[loop.create_task(p[0].wait()) for p in active_procs],
return_when=FIRST_COMPLETED)
# print("done=" + str(done) + " pending=" + str(pending))
old_active_procs = active_procs
active_procs = []
for p in old_active_procs:
if p[0].returncode is None:
active_procs.append(p)
else:
if p[2] is not None:
p[2].close() # Close stdout save
spec = p[1]
if os.path.isfile(os.path.join(p[1].rundir, "status.txt")):
is_passed,msg = self.checkstatus(os.path.join(p[1].rundir, "status.txt"))
if is_passed:
print(f"{Fore.GREEN}[PASS]{Style.RESET_ALL} " + p[1].fullname + " - " + msg)
n_passed += 1
else:
if spec.rerun:
print(f"{Fore.YELLOW}[ExpFail]{Style.RESET_ALL} " + p[1].fullname + " - " + msg + " (rerun)")
else:
print(f"{Fore.RED}[FAIL]{Style.RESET_ALL} " + p[1].fullname + " - " + msg)
# Number of failures shouldn't be bumped for re-runs
n_failed += 1
if self.rerun_failing:
# Determine whether we need to rerun with debug
if not "MKDV_DEBUG" in spec.variables.keys() or spec.variables["MKDV_DEBUG"] != "1":
print(f"{Fore.YELLOW}[QueueDebugRerun]{Style.RESET_ALL} " + spec.fullname)
spec.variables["MKDV_DEBUG"] = "1"
# Add a '_dbg' suffix
spec.rundir += "_dbg"
spec.rerun = True
run_q.insert(0, spec)
pass
else:
print(f"{Fore.RED}[FAIL]{Style.RESET_ALL} " + p[1].fullname + " - no status.txt")
n_failed += 1
sys.stdout.flush()
end = datetime.datetime.now()
duration = end - start
print()
print()
print(f"{Fore.YELLOW}[Run ]{Style.RESET_ALL} " + str(n_passed+n_failed))
print(f"{Fore.GREEN}[Pass]{Style.RESET_ALL} " + str(n_passed))
print(f"{Fore.RED}[Fail]{Style.RESET_ALL} " + str(n_failed))
tv = str(duration)
tv = tv[0:tv.rfind('.')]
print(f"{Fore.YELLOW}[Time]{Style.RESET_ALL} %s" % tv)
def write_job_yaml(
self,
job_yaml,
cachedir,
spec : JobSpec):
with open(job_yaml, "w") as fp:
fp.write("job:\n");
fp.write(" mkfile: %s\n" % spec.mkdv_mk)
fp.write(" cachedir: %s\n" % cachedir)
fp.write(" reportdir: %s\n" % os.path.join(self.root, "report"))
fp.write(" name: %s\n" % spec.localname)
fp.write(" qname: %s\n" % spec.fullname)
if spec.limit_time is not None:
fp.write(" limit-time: %s\n" % str(spec.limit_time))
elif self.limit_time is not None:
fp.write(" limit-time: %s\n" % str(self.limit_time))
if spec.rerun:
fp.write(" rerun: true\n")
else:
fp.write(" rerun: false\n")
if len(spec.variables) > 0:
fp.write(" variables:\n")
for v in spec.variables.keys():
fp.write(" %s: \"%s\"\n" % (v, spec.variables[v]))
if spec.description is not None:
fp.write(" description: |\n")
for line in spec.description.split("\n"):
fp.write(" %s\n" % line)
if len(spec.labels) > 0:
fp.write(" labels:\n")
for key in spec.labels.keys():
fp.write(" - %s: %s\n" % (key, spec.labels[key]))
if len(spec.parameters) > 0:
fp.write(" parameters:\n")
for key in spec.parameters.keys():
fp.write(" - %s: %s\n" % (key, spec.parameters[key]))
if len(spec.attachments) > 0:
fp.write(" attachments:\n")
for a in spec.attachments:
fp.write(" - %s: %s\n" % (a[0], a[1]))
# fp.write(" rundir: %s\n" % rundir)spec.mkdv_mk)
# cmdline.append("-f")
# cmdline.append(spec.mkdv_mk)
# cmdline.append("MKDV_RUNDIR=" + rundir)
# cmdline.append("MKDV_CACHEDIR=" + queue.cachedir)
# cmdline.append("MKDV_TEST=" + spec.localname)
# cmdline.append("MKDV_JOB=" + spec.localname)
# cmdline.append("MKDV_JOB_QNAME=" + spec.fullname)
# cmdline.append("MKDV_JOB_PARENT=" + spec.fullname[0:-(len(spec.localname)+1)])
# for v in spec.variables.keys():
# cmdline.append(v + "=" + str(spec.variables[v]))
# # cmdline.append("MKDV_TEST=" + spec.localname)
# # TODO: separate build/run
# cmdline.append("run")
def init_spec(self, spec : JobSpec):
if "MKDV_TOOL" not in spec.variables.keys() and self.tool is not None:
spec.variables["MKDV_TOOL"] = str(self.tool)
async def run_builds(self, jobs : List[JobQueue]):
loop = asyncio.get_event_loop()
build_fails = 0
# Start
queue_i = 0
active_procs = []
if self.maxpar > 0:
avail_jobs = self.maxpar
else:
# Launch everything
avail_jobs = len(jobs)
# print("spec_i=" + str(spec_i) + " " + str(len(self.specs)))
while queue_i < len(jobs) or len(active_procs) > 0:
# print("spec_i=" + str(spec_i) + " " + str(len(self.specs)))
# Launch new jobs while there is quota
# and
while avail_jobs > 0 and queue_i < len(jobs):
job = jobs[queue_i]
job.cachedir = os.path.join(self.root, "cache_" + str(queue_i))
os.makedirs(job.cachedir, exist_ok=True)
cmdline = ["make"]
cmdline.append("-f")
cmdline.append(job.mkdv_mk)
cmdline.append("MKDV_RUNDIR=" + job.cachedir)
cmdline.append("MKDV_CACHEDIR=" + job.cachedir)
# TODO: separate build/run
cmdline.append("_setup")
stdout = open(os.path.join(job.cachedir, "stdout.log"), "w")
# stdout = DEVNULL
# stdout = None
# print("cmdline: " + str(cmdline))
print(f"{Fore.YELLOW}[Start Setup]{Style.RESET_ALL} " + job.mkdv_mk)
sys.stdout.flush()
proc = await asyncio.subprocess.create_subprocess_exec(
*cmdline,
cwd=job.cachedir,
stderr=STDOUT,
stdout=stdout)
# print("proc=" + str(proc))
active_procs.append((proc,job,stdout))
queue_i += 1
avail_jobs -= 1
# Wait for at least once job to complete
done, pending = await asyncio.wait(
[loop.create_task(p[0].wait()) for p in active_procs],
return_when=FIRST_COMPLETED)
old_active_procs = active_procs
active_procs = []
for p in old_active_procs:
if p[0].returncode is None:
active_procs.append(p)
else:
p[2].close() # Close stdout save
if p[0].returncode == 0:
print(f"{Fore.GREEN}[Setup PASS]{Style.RESET_ALL} " + p[1].mkdv_mk)
else:
build_fails += 1
print(f"{Fore.RED}[Setup FAIL]{Style.RESET_ALL} " + p[1].mkdv_mk + " -- exit code " + str(p[0].returncode))
sys.stdout.flush()
avail_jobs += 1
return build_fails == 0
def checkstatus(self, status_txt):
have_pass = False
have_fail = False
msg = ""
with open(status_txt, "r") as fp:
for l in fp.readlines():
if l.startswith("PASS:"):
have_pass = True
msg = l[len("PASS:"):].strip()
break
elif l.startswith("FAIL:"):
have_fail = True
msg = l[len("FAIL:"):].strip()
break
if not have_pass and not have_fail:
return (False,"no PASS or FAIL")
else:
return ((have_pass and not have_fail),msg)
|
import uuid
import src.models.users.constants as UserConstants
from src.common.database import Database
from src.common.utils import Utils
from src.models.notebooks.notebook import Notebook
from src.models.tags.tag import Tag
import src.models.users.errors as UserErrors
class User(object):
def __init__(self, username, password, email, lists=['main', 'inbox'], _id=None):
self.username = username
self.password = password
self.email = email
self.lists = lists
self._id = uuid.uuid4().hex if _id is None else _id
self.id = self._id
def __repr__(self):
return "<User {}>".format(self.username)
@staticmethod
def is_login_valid(username, password):
user_data = Database.find_one(UserConstants.COLLECTION, {"username": username})
if user_data is None:
raise UserErrors.UserNotExistsError("User not found.")
if not Utils.check_hashed_password(password, user_data['password']):
raise UserErrors.IncorrectPasswordError("Incorrect Password")
return True
@staticmethod
def register_user(username, password, email):
user_data = Database.find_one(UserConstants.COLLECTION, {"username": username})
if user_data is not None:
raise UserErrors.UserAlreadyRegisteredError("Username taken. Please choose another one.")
if not Utils.email_is_valid(email):
raise UserErrors.InvalidEmailError("Invalid email format.")
User(username, Utils.hash_password(password), email).save_to_mongo()
notebook = Notebook("inbox", username)
notebook.save_to_mongo()
return True
def save_to_mongo(self):
Database.insert(UserConstants.COLLECTION, self.json())
def json(self):
return {
"username": self.username,
"password": self.password,
"email": self.email,
"lists": self.lists,
"_id": self._id
}
@classmethod
def find_by_username(cls, username):
return cls(**Database.find_one(UserConstants.COLLECTION, {'username': username}))
@classmethod
def find_by_id(cls, id):
return cls(**Database.find_one(UserConstants.COLLECTION, {'_id': id}))
def get_notebooks(self):
return Notebook.find_by_username(self.username)
def get_tags(self):
return Tag.find_by_username(self.username)
def update(self):
Database.update(UserConstants.COLLECTION, {'_id': self._id}, self.json())
|
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
import dash_alternative_viz as dav
import plotly_express as px
import altair as alt
from bokeh.embed import json_item
import holoviews as hv
import matplotlib.pyplot as plt
import seaborn as sns
from io import BytesIO
hv.extension("bokeh")
gapminder = px.data.gapminder()
app = dash.Dash(__name__)
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
td_style = {"width": "50%", "margin": "20px"}
app.layout = html.Div([
html.Div([ dcc.Slider(id="year", min=1952, max=2007, step=5,
marks={x: str(x) for x in range(1952, 2008, 5)}) ],
style={ "width": "600px", "padding-bottom": "30px", "margin": "0 auto",
"margin-top": "-70px", "text-align": "center" } ),
html.Table([
html.Tr([html.Td([dcc.Graph(id="px")], style=td_style),
html.Td([dav.VegaLite(id="vega")], style=td_style) ]),
html.Tr([html.Td([dav.Svg(id="seaborn")], style=td_style),
html.Td([dav.BokehJSON(id="bokeh")], style=td_style) ]),
], style={"width": "1000px", "margin": "0 auto"} ),
])
@app.callback(Output("px", "figure"), [Input("year", "value")])
def plotly_fig(year):
df = gapminder.query("year == %d" % (year or 1952))
return (px.scatter(df, x="gdpPercap", y="lifeExp", size="pop", size_max=30,
color="continent", log_x=True, height=400, width=600,
title="Plotly Express", hover_name="country", hover_data=df.columns)
.for_each_trace(lambda t: t.update(name=t.name.replace("continent=", "")))
.update(layout_margin_t=60, layout_title_y=0.91) )
@app.callback(Output("vega", "spec"), [Input("year", "value")])
def altair_fig(year):
df = gapminder.query("year == %d" % (year or 1952))
return ( alt.Chart(df, height=250, width=400).mark_circle()
.encode(alt.X("gdpPercap:Q", scale=alt.Scale(type="log")),
alt.Y("lifeExp:Q", scale=alt.Scale(zero=False)),
size="pop:Q", color="continent:N", tooltip=list(df.columns))
.interactive().properties(title="Altair / Vega-Lite").to_dict() )
@app.callback(Output("bokeh", "json"), [Input("year", "value")])
def bokeh_fig(year):
df = gapminder.query("year == %d" % (year or 1952))
return json_item( hv.render( hv.Points(df, kdims=["gdpPercap", "lifeExp"])
.opts( color="continent", size=hv.dim("pop") ** (0.5) / 800,
logx=True, height=330, width=530, cmap="Category10",tools=["hover"],
legend_position="bottom_right", title="HoloViews / Bokeh") ))
@app.callback(Output("seaborn", "contents"), [Input("year", "value")])
def seaborn_fig(year):
df = gapminder.query("year == %d" % (year or 1952))
fig, ax = plt.subplots()
sns.scatterplot( data=df, ax=ax, x="gdpPercap", y="lifeExp",
size="pop", hue="continent", sizes=(0, 800))
ax.set_xscale("log")
ax.set_title("Seaborn / matplotlib")
fig.set_size_inches(5.5, 3.7)
fig.tight_layout()
b_io = BytesIO()
fig.savefig(b_io, format="svg")
return b_io.getvalue().decode("utf-8")
if __name__ == "__main__":
app.run_server(debug=True)
|
"""Run a Flask web server for symbol recognition."""
# Core Library modules
import base64
import io
from pathlib import Path
from typing import Any, Dict
# Third party modules
from flask import Flask, render_template, request
from PIL import Image
def create_app(model: Path, labels: Path) -> Flask:
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html")
@app.route("/classify", methods=["POST"])
def classify() -> Dict[str, Any]:
# First party modules
from minimal_symbol_recognizer.predict import predict
imagestr = request.form["imgBase64"]
decoded = base64.b64decode(imagestr.split(",")[1])
image = Image.open(io.BytesIO(decoded))
predictions = predict(model, labels, image)
image.close()
for pred in predictions[:5]:
print(pred) # TODO: Just temporarily added
return {
"errors": [],
"prediction": [
{"symbol": pred, "probability": f"{prob * 100:.0f}%"}
for pred, prob in predictions[:5]
],
}
return app
def run_test_server(model: Path, labels: Path) -> None:
app = create_app(model, labels)
app.run(host="0.0.0.0")
|
from django import forms
from .models import InputModel
# from .models import Location
#
# class LocationForm(forms.ModelForm):
# class Meta:
# model = Location
# fields = "__all__"
class InputFormModel(forms.ModelForm):
class Meta:
model = InputModel
fields = ('info', )
def __init__(self, *args, **kwargs):
super(InputFormModel, self).__init__(*args, **kwargs)
self.fields['info'].widget = forms.TextInput(attrs={
'id': 'info',
'type': 'text',
'name': 'cord',
'style': "width:80%; font-size:26px;border-radius:10px;",
'placeholder': 'Select a location on map!'}) |
# Author: Matthew Shelbourn | Student ID: 001059665 | mshelbo@wgu.edu | December, 2020
# distance.py ingests data from "WGUPS Distance Table.csv", assigns them to objects for use in program
import csv
# Ingest distance data from 'wgups-distance-data.csv' and assign to list
# Space-time complexity O(N)
with open('./data/wgups-distance-data.csv') as csvfile:
distance_csv = csv.reader(csvfile, delimiter=',')
distances = [row for row in distance_csv]
# Ingest distance address data from 'wgups-distance-addresses.csv' and assign to object
# Space-time complexity O(N)
with open('./data/wgups-distance-addresses-data.csv') as csvfile:
distance_addresses_csv = csv.reader(csvfile, delimiter=',')
distance_addresses = {int(rows[0]): {'name': rows[1], 'address': rows[2]} for rows in distance_addresses_csv}
# Getter for distances
# Space-time complexity O(1)
def get_distances():
return distances
# Getter for addresses
# Space-time complexity O(1)
def get_addresses():
return distance_addresses
# Getter for distance_calc
# Retrieves distance between 2 addresses based on their indices in the distance matrix
# Space-time complexity O(1)
def calc_distance(add_1, add_2):
if add_1 == 0 and add_2 == 0:
return 0
elif add_1 == 0:
try:
return distances[add_1][add_2[13]]
except IndexError:
return distances[add_2[13]][add_1]
elif add_2 == 0:
try:
return distances[add_1[13]][add_2]
except IndexError:
return distances[add_2][add_1[13]]
else:
try:
return distances[add_1[13]][add_2[13]]
except IndexError:
return distances[add_2[13]][add_1[13]]
# Getter for get_dest_name
# Retrieves the name of a destination based on a given package
# Space-time complexity O(1)
def get_dest_name(package):
if package == 0:
return distance_addresses[0]['name']
else:
return distance_addresses[package[13]]['name'] |
def employee(name, *manager):
print name
print manager
employee('Mohan')
employee('akash','jatin')
------------------------------------------------------------------------------------
def employee(name.**kwargs):
print name
print kwargs
employee('jatin')
employee('jatin', age=35, manager='rahul', location='pune')
|
"""
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
.. contributor :: Sofia Meneses-Goytia <s.menesesgoytia__at__gmail.com>
.. contributor :: Violeta Gonzalez-Perez <violegp__at__gmail.com>
.. contributor :: Harry Hicks <iamhrh__at__hotmail.co.uk>
.. contributor :: Justus Neumann <jusneuma.astro__at__gmail.com>
General purpose:
................
The class StellarPopulationModel is a wrapper dedicated to handling the fit of stellar population models on observed spectra.
It gathers all inputs : from the model and from the data.
*Imports*::
import numpy as np
import astropy.io.fits as pyfits
import astropy.units as u
import glob
import pandas as pd
import os,sys
import copy
from firefly_estimations_3d import estimation
from firefly_instrument import *
from firefly_dust import *
from firefly_fitter import *
from firefly_library import *
"""
import time
import numpy as np
import astropy.io.fits as pyfits
import astropy.units as u
import glob
import pandas as pd
import os,sys
from os.path import join
import copy
from scipy.interpolate import interp1d
#from scipy.stats import sigmaclip
from firefly_estimations_3d import estimation
#from firefly_dust import *
#import firefly_dust as f_dust
from firefly_dust import hpf, unred, determine_attenuation, dust_calzetti_py
from firefly_instrument import downgrade
from firefly_fitter import fitter
from firefly_library import airtovac, convert_chis_to_probs, light_weights_to_mass, calculate_averages_pdf, normalise_spec, match_data_models
import matplotlib.pyplot as plt
default_value = -9999
EPS = 10.E-10
dict_imfs = {'cha': 'Chabrier', 'ss': 'Salpeter', 'kr': 'Kroupa'}
def trylog10(value):
if (value<EPS):
logv = default_value
else:
logv = np.log10(value)
return logv
class StellarPopulationModel:
"""
:param specObs: specObs observed spectrum object initiated with the firefly_setup class.
:param models: choose between 'MaStar', 'm11'
* MaStar corresponds to Maraston et al. 2020 <https://ui.adsabs.harvard.edu/abs/2019arXiv191105748M>
* m11 corresponds to all the models compared in `Maraston and Stromback 2011 <http://adsabs.harvard.edu/abs/2011MNRAS.418.2785M>`_.
:param model_libs: only necessary if using m11 or MaStar.
Choose between `MILES <http://adsabs.harvard.edu/abs/2011A%26A...532A..95F>`_, MILES revisednearIRslope, MILES UVextended, `STELIB <http://adsabs.harvard.edu/abs/2003A%26A...402..433L>`_, `ELODIE <http://adsabs.harvard.edu/abs/2007astro.ph..3658P>`_, `MARCS <http://adsabs.harvard.edu/abs/2008A%26A...486..951G>`_.
* MILES, MILES, STELIB, ELODIE are empirical libraries.
* MARCS is a theoretical library.
For MaStar models choose between 'E-MaStar' or 'Th-MaStar'.
* E-MaStar stellar parameters are derived from fitting empirical stellar spectra from the MILES stellar library (see Chen et al 2020, in preparation).
* Th-MaStar stellar parameters are derived from fitting stellar spectra of theoretical stellar atmospheres from MARCS and ATLAS (see Hill et al 2020, in preparation).
:param imfs: choose the `initial mass function <https://en.wikipedia.org/wiki/Initial_mass_function>`_:
* 'ss' for `Salpeter <http://adsabs.harvard.edu/abs/1955ApJ...121..161S>`_or
* 'kr' for `Kroupa <http://adsabs.harvard.edu/cgi-bin/bib_query?arXiv:1112.3340>`_ or
:param hpf_mode: 'on' means the code uses HPF to dereden the spectrum, if 'hpf_only' then EBV=0.
Notes
-----
.. note::
*This is how it proceeds :*
#. reads the parameter file by using parameters_obtain(parameters.py)
#. It opens the data file, model files, then it matches their resolutions by downgrading the models to instrumental and velocity dispersion resolution
#. Determines dust attenuation curve to be applied to the models. Two options : through HPF fitting (3.1.) or through filtered values to determing SP properties (3.2.).
#. It fits the models to the data
#. Gets mass-weighted SSP contributions using saved M/L ratio.
#. Convert chis into probabilities and calculates all average properties and errors (assuming the number of degrees of freedom is the number of wavelength points)
#. Optionally produces a plot
#. Finally, it writes the output files
"""
def __init__(self, specObs, outputFile, cosmo, models = 'MaStar', model_libs = ['gold'], imfs = ['kr'], hpf_mode = 'on', age_limits = [0,15], downgrade_models = True, dust_law = 'calzetti', max_ebv = 1.5, num_dust_vals = 200, dust_smoothing_length = 200, max_iterations = 10, fit_per_iteration_cap = 1000, pdf_sampling = 300, data_wave_medium = 'vacuum', Z_limits = [-3,3], wave_limits = [0,99999990], suffix = "",use_downgraded_models = False, write_results=True, flux_units=10**-17):
self.cosmo = cosmo
self.specObs = specObs
self.outputFile = outputFile
#################### STARTS HERE ####################
# sets the models
self.models = models # m11/MaStar
self.model_libs = model_libs
self.suffix = suffix
self.deltal_libs = []
self.vdisp_round = int(round(self.specObs.vdisp/5.0)*5.0) # rounding vDisp for the models
self.use_downgraded_models = use_downgraded_models
self.write_results = write_results
self.flux_units = flux_units
if (self.models == 'm11') or (self.models == 'm11-sg'):
for m in self.model_libs:
if m == 'MILES':
self.deltal_libs.append(2.55)
elif m == 'STELIB':
self.deltal_libs.append(3.40)
elif m == 'ELODIE':
self.deltal_libs.append(0.55)
elif m == 'MARCS':
self.deltal_libs.append(0.1)
elif self.models =='MaStar':
model_path = os.environ['STELLARPOPMODELS_DIR']
ver='v1.1'
hdul=pyfits.open(model_path+'/MaStar_SSP_'+ver+'.fits.gz')
r_model=hdul[2].data[1,:]
# This provides R=lamba/delta_lambda as numpy ndarray. The params deltal_libs and deltal should probably be renamed.
self.deltal_libs.append(r_model)
# sets the Initial mass function
self.imfs = imfs
self.hpf_mode = hpf_mode
self.age_limits = age_limits
self.downgrade_models = downgrade_models
self.dust_law = dust_law
self.max_ebv = max_ebv
self.num_dust_vals = num_dust_vals
self.dust_smoothing_length = dust_smoothing_length
# Specific fitting options
self.max_iterations = max_iterations
self.fit_per_iteration_cap = fit_per_iteration_cap
# Sampling size when calculating the maximum pdf (100=recommended)
self.pdf_sampling = pdf_sampling
# Default is air, unless manga is used
self.data_wave_medium = data_wave_medium
self.Z_limits = Z_limits
self.wave_limits = wave_limits
def get_model(self, model_used, imf_used, deltal, vdisp, wave_instrument, r_instrument, ebv_mw):
"""
Retrieves all relevant model files, in their downgraded format.
If they aren't downgraded to the correct resolution / velocity dispersion,
takes the base models in their native form and converts to downgraded files.
:param model_used: list of models to be used, for example ['m11', 'm09'].
:param imf_used: list of imf to be used, for example ['ss', 'kr'].
:param deltal: delta lambda in the models.
:param vdisp: velocity dispersion observed in the galaxy.
:param wave_instrument: wavelength array from the observations
:param r_instrument: resolution array from the observations
:param ebv_mw: E(B-V) from the dust maps for the galaxy.
Workflow
----------
A. loads the models m11 or MaStar: maps parameters to the right files. Then it constructs the model array. Finally converts wavelengths to air or vacuum.
B. downgrades the model to match data resolution
C. applies attenuation
D. stores models in
self.model_wavelength,
self.model_flux,
self.age_model,
self.metal_model
and returns it as well
"""
if self.models == 'm11-sg':
first_file = True
model_files = []
#print('yes we are in here')
#stop
# if self.use_downgraded_models :
# if model_used == 'MILES_UVextended' or model_used == 'MILES_revisedIRslope':
# model_path = join(os.environ['STELLARPOPMODELS_DIR'],'SSP_M11_MILES_downgraded','ssp_M11_' + model_used+ '.' + imf_used)
# else:
# model_path = join(os.environ['STELLARPOPMODELS_DIR'],'SSP_M11_'+ model_used + '_downgraded', 'ssp_M11_' +model_used +'.' + imf_used)
# else:
# if model_used == 'MILES_UVextended' or model_used == 'MILES_revisedIRslope':
# model_path = join(os.environ['STELLARPOPMODELS_DIR'],'SSP_M11_MILES', 'ssp_M11_'+model_used+'.'+imf_used)
# else:
model_path = join(os.environ['STELLARPOPMODELS_DIR'],'SSP_M11_'+model_used+'_SG' ,'ssp_M11_' +model_used +'.' + imf_used)
# Constructs the metallicity array of models :
all_metal_files = sorted(glob.glob(model_path+'*'))
#print(model_path)
#print(all_metal_files)
#stop
## # print all_metal_files
metal_files = []
metal = [] #[-2.25, -1.35, -0.33, 0, 0.35]
for z in range(len(all_metal_files)):
zchar = all_metal_files[z][len(model_path):]
if zchar == 'z001.sg':
znum = 10**(-0.33)
elif zchar == 'z002.sg':
znum = 10**(0)
elif zchar == 'z004.sg':
znum = 10**(0.35)
elif zchar == 'z0001.bhb.sg':
#znum = -1.301
znum = 10**(-1.35) #10**-1.301
elif zchar == 'z0001.rhb.sg':
#znum = -1.302
znum = 10**(-1.35) #10**-1.302
#elif zchar == 'z10m4.bhb':
#znum = -2.301
#znum = 10**(-2.25) #10**-2.301
#elif zchar == 'z10m4.rhb':
#znum = -2.302
#znum = 10**(-2.25) #10**-2.302
#elif zchar == 'z10m4':
#znum = -2.300
#znum = 10**(-2.25) #10**-2.300
elif zchar == 'z0p25.sg':
znum = 10**0.25
elif zchar == 'zm0p7.bhb.sg':
znum = 10**-0.7
elif zchar == 'zm0p7.rhb.sg':
znum = 10**-0.7
elif zchar == 'zm1p0.bhb.sg':
znum = 10**-1.0
elif zchar == 'zm1p0.rhb.sg':
znum = 10**-1.0
else:
raise NameError('Unrecognised metallicity! Check model file names.')
if znum>10**(self.Z_limits[0]) and znum<10**(self.Z_limits[1]):
metal_files.append(all_metal_files[z])
metal.append(znum)
#print(metal_files)
#stop
# constructs the model array
model_flux, age_model, metal_model = [],[],[]
for zi,z in enumerate(metal_files):
# print "Retrieving and downgrading models for "+z
model_table = pd.read_table(z,converters={'Age':np.float64}, header=None ,usecols=[0,2,3], names=['Age','wavelength_model','flux_model'], delim_whitespace=True)
age_data = np.unique(model_table['Age'].values.ravel())
# print(age_data)
# stop
for a in age_data:
logyrs_a = trylog10(a)+9.0
## print "age model selection:", self.age_limits[0], logyrs_a, self.age_limits[1]
if (((10**(logyrs_a-9)) < self.age_limits[0]) or ((10**(logyrs_a-9)) > self.age_limits[1])):
continue
else:
spectrum = model_table.loc[model_table.Age == a, ['wavelength_model', 'flux_model'] ].values
wavelength_int,flux = spectrum[:,0],spectrum[:,1]
# converts to air wavelength
if self.data_wave_medium == 'vacuum':
wavelength = airtovac(wavelength_int)
else:
wavelength = wavelength_int
# downgrades the model
if self.downgrade_models:
mf = downgrade(wavelength,flux,deltal,self.specObs.vdisp, wave_instrument, r_instrument)
else:
mf = copy.copy(flux)
# Reddens the models
if ebv_mw != 0:
attenuations = unred(wavelength,ebv=0.0-ebv_mw)
model_flux.append(mf*attenuations)
else:
model_flux.append(mf)
age_model.append(a)
metal_model.append(metal[zi])
first_model = False
#print(wavelength)
#stop
# print "Retrieved all models!"
self.model_wavelength, self.model_flux, self.age_model, self.metal_model = wavelength, model_flux, age_model, metal_model
return wavelength, model_flux, age_model, metal_model
# first the m11 case
if self.models == 'm11':
first_file = True
model_files = []
#print('yes we are in here')
#stop
# if self.use_downgraded_models :
# if model_used == 'MILES_UVextended' or model_used == 'MILES_revisedIRslope':
# model_path = join(os.environ['STELLARPOPMODELS_DIR'],'SSP_M11_MILES_downgraded','ssp_M11_' + model_used+ '.' + imf_used)
# else:
# model_path = join(os.environ['STELLARPOPMODELS_DIR'],'SSP_M11_'+ model_used + '_downgraded', 'ssp_M11_' +model_used +'.' + imf_used)
# else:
# if model_used == 'MILES_UVextended' or model_used == 'MILES_revisedIRslope':
# model_path = join(os.environ['STELLARPOPMODELS_DIR'],'SSP_M11_MILES', 'ssp_M11_'+model_used+'.'+imf_used)
# else:
model_path = join(os.environ['STELLARPOPMODELS_DIR'],'SSP_M11_'+model_used ,'ssp_M11_' +model_used +'.' + imf_used)
# Constructs the metallicity array of models :
all_metal_files = sorted(glob.glob(model_path+'*'))
#print(model_path)
#print(all_metal_files)
#stop
## # print all_metal_files
metal_files = []
metal = [] #[-2.25, -1.35, -0.33, 0, 0.35]
for z in range(len(all_metal_files)):
zchar = all_metal_files[z][len(model_path):]
if zchar == 'z001':
#znum = -0.3
znum = 10**(-0.33) #0.5
elif zchar == 'z002':
#znum = 0.0
znum = 10**(0) #1.0
elif zchar == 'z004':
#znum = 0.3
znum = 10**(0.35) #2.0
elif zchar == 'z0001.bhb':
#znum = -1.301
znum = 10**(-1.35) #10**-1.301
elif zchar == 'z0001.rhb':
#znum = -1.302
znum = 10**(-1.35) #10**-1.302
elif zchar == 'z10m4.bhb':
#znum = -2.301
znum = 10**(-2.25) #10**-2.301
elif zchar == 'z10m4.rhb':
#znum = -2.302
znum = 10**(-2.25) #10**-2.302
elif zchar == 'z10m4':
#znum = -2.300
znum = 10**(-2.25) #10**-2.300
elif zchar == 'z-0.6':
znum = 10**-0.6
elif zchar == 'z-0.9':
znum = 10**-0.9
elif zchar == 'z-1.2':
znum = 10**-1.2
elif zchar == 'z-1.6':
znum = 10**-1.6
elif zchar == 'z-1.9':
znum = 10**-1.9
else:
raise NameError('Unrecognised metallicity! Check model file names.')
if znum>10**(self.Z_limits[0]) and znum<10**(self.Z_limits[1]):
metal_files.append(all_metal_files[z])
metal.append(znum)
#print(metal_files)
#stop
# constructs the model array
model_flux, age_model, metal_model = [],[],[]
for zi,z in enumerate(metal_files):
# print "Retrieving and downgrading models for "+z
model_table = pd.read_table(z,converters={'Age':np.float64}, header=None ,usecols=[0,2,3], names=['Age','wavelength_model','flux_model'], delim_whitespace=True)
age_data = np.unique(model_table['Age'].values.ravel())
# print(age_data)
# stop
for a in age_data:
logyrs_a = trylog10(a)+9.0
## print "age model selection:", self.age_limits[0], logyrs_a, self.age_limits[1]
if (((10**(logyrs_a-9)) < self.age_limits[0]) or ((10**(logyrs_a-9)) > self.age_limits[1])):
continue
else:
spectrum = model_table.loc[model_table.Age == a, ['wavelength_model', 'flux_model'] ].values
wavelength_int,flux = spectrum[:,0],spectrum[:,1]
# converts to air wavelength
if self.data_wave_medium == 'vacuum':
wavelength = airtovac(wavelength_int)
else:
wavelength = wavelength_int
# downgrades the model
if self.downgrade_models:
mf = downgrade(wavelength,flux,deltal,self.specObs.vdisp, wave_instrument, r_instrument)
else:
mf = copy.copy(flux)
# Reddens the models
if ebv_mw != 0:
attenuations = unred(wavelength,ebv=0.0-ebv_mw)
model_flux.append(mf*attenuations)
else:
model_flux.append(mf)
age_model.append(a)
metal_model.append(metal[zi])
first_model = False
#print(wavelength)
#stop
# print "Retrieved all models!"
self.model_wavelength, self.model_flux, self.age_model, self.metal_model = wavelength, model_flux, age_model, metal_model
return wavelength, model_flux, age_model, metal_model
elif self.models =='MaStar':
model_path = os.environ['STELLARPOPMODELS_DIR']
ver = 'v1.1'
lib = model_used
if imf_used == 'kr':
slope = 1.3
elif imf_used == 'ss':
slope = 2.35
else:
print('Unrecognised IMF. Please choose between kr and ss')
sys.exit()
#print('IMF slope used: '+str(slope))
hdul=pyfits.open(model_path+'/MaStar_SSP_'+ver+'.fits.gz')
t=hdul[1].data[:,0,0,0]
Z=hdul[1].data[0,:,0,1]
s=hdul[1].data[0,0,:,2]
#wavelength=hdul[2].data
wavelength=hdul[2].data[0,:]
if (lib=='gold'):
fluxgrid=hdul[3].data
sidx = np.where(s==slope)[0][0]
model_flux, age_model, metal_model = [],[],[]
for ii,age in enumerate(t):
if ((age < self.age_limits[0]) or (age > self.age_limits[1])):
continue
for jj,metal in enumerate(Z):
if ((metal<self.Z_limits[0]) or (metal>self.Z_limits[1])):
continue
if (metal<-1.35 and age<1):
continue
flux = fluxgrid[ii,jj,sidx,:]
# no conversion to vacuum needed, assuming models are in vacuum
# downgrades the model
if self.downgrade_models:
mf = downgrade(wavelength,flux,deltal,self.specObs.vdisp, wave_instrument, r_instrument)
else:
mf = copy.copy(flux)
# Reddens the models
if ebv_mw != 0:
attenuations = unred(wavelength,ebv=0.0-ebv_mw)
model_flux.append(mf*attenuations)
else:
model_flux.append(mf)
age_model.append(age)
metal_model.append(10**metal)
#print("Retrieved all models!")
self.model_wavelength, self.model_flux, self.age_model, self.metal_model = wavelength, model_flux, age_model, metal_model
return wavelength, model_flux, age_model, metal_model
def fit_models_to_data(self):
"""
Once the data and models are loaded, then execute this function to find the best model. It loops overs the models to be fitted on the data:
#. gets the models
#. matches the model and data to the same resolution
#. normalises the spectra
"""
t_i = time.time()
print( "getting the models, t=", t_i )
for mi,mm in enumerate(self.model_libs):
# loop over the models
for ii in self.imfs:
# loop over the IMFs
# A. gets the models
deltal = self.deltal_libs[mi]
model_wave_int, model_flux_int, age, metal = self.get_model( mm, ii, deltal, self.specObs.vdisp, self.specObs.restframe_wavelength, self.specObs.r_instrument, self.specObs.ebv_mw)
# B. matches the model and data to the same resolution
#print( "Matching models to data" )
#print("data: w,f,b,fe", len(self.specObs.restframe_wavelength), len(self.specObs.flux), len(self.specObs.bad_flags), len(self.specObs.error) )
self.raw_model_wave_int = model_wave_int
self.raw_model_flux_int = model_flux_int
self.raw_age = age
self.raw_metal = metal
#print(len(model_wave_int), len(model_flux_int), len(age), len(metal))
wave, data_flux, error_flux, model_flux_raw = match_data_models( self.specObs.restframe_wavelength, self.specObs.flux, self.specObs.bad_flags, self.specObs.error, model_wave_int, model_flux_int, self.wave_limits[0], self.wave_limits[1], saveDowngradedModel = False)
#print("model: w,f,fe,fr", len(wave), len(data_flux), len(error_flux), len(model_flux_raw))
self.matched_wave = wave
self.matched_model_flux_raw = model_flux_raw
# C. normalises the models to the median value of the model [erg/s/A/Msun]
# print "Normalising the models"
model_flux, mass_factors = normalise_spec(data_flux, model_flux_raw)
self.matched_model_flux = model_flux
self.matched_mass_factors = mass_factors
# 3. Corrects from dust attenuation
print('Corrects from dust attenuation and fitting, Dt=', time.time()-t_i,'seconds')
if self.hpf_mode=='on':
# 3.1. Determining attenuation curve through HPF fitting, apply attenuation curve to models and renormalise spectra
best_ebv, attenuation_curve = determine_attenuation(wave, data_flux, error_flux, model_flux, self, age, metal)
self.attenuation_curve = attenuation_curve
#model_flux_atten = np.zeros(np.shape(model_flux_raw))
#for m in range(len(model_flux_raw)):
#model_flux_atten[m] = attenuation_curve * model_flux_raw[m]
model_flux_atten = np.array([ attenuation_curve * model_flux_raw[m] for m in range(len(model_flux_raw)) ])
model_flux, mass_factors = normalise_spec(data_flux, model_flux_atten)
print('dust done, Dt=', time.time()-t_i,'seconds')
# 4. Fits the models to the data
#self.fit_per_iteration_cap = 1000
light_weights, chis, branch = fitter(wave, data_flux, error_flux, model_flux, self)
print('fitting done, Dt=', time.time()-t_i,'seconds')
elif self.hpf_mode == 'hpf_only':
# 3.2. Uses filtered values to determing SP properties only."
smoothing_length = self.dust_smoothing_length
hpf_data = hpf(data_flux)
hpf_models = np.zeros(np.shape(model_flux))
for m in range(len(model_flux)):
hpf_models[m] = hpf(model_flux[m])
zero_dat = np.where( (np.isnan(hpf_data)) & (np.isinf(hpf_data)) )
hpf_data[zero_dat] = 0.0
for m in range(len(model_flux)):
hpf_models[m,zero_dat] = 0.0
hpf_error = np.zeros(len(error_flux))
hpf_error[:] = np.median(error_flux)/np.median(data_flux) * np.median(hpf_data)
hpf_error[zero_dat] = np.max(hpf_error)*999999.9
best_ebv = 0.0
hpf_models,mass_factors = normalise_spec(hpf_data,hpf_models)
print('dust done, Dt=', time.time()-t_i,'seconds')
# 4. Fits the models to the data
light_weights, chis, branch = fitter(wave, hpf_data, hpf_error, hpf_models, self)
print('fitting done, Dt=', time.time()-t_i,'seconds')
print('Gets the best model, Dt=', time.time()-t_i,'seconds')
# 5. Get mass-weighted SSP contributions using saved M/L ratio.
unnorm_mass, mass_weights = light_weights_to_mass(light_weights, mass_factors)
# print "Fitting complete"
if np.all(np.isnan(mass_weights)):
tbhdu = self.create_dummy_hdu()
else:
# print "Calculating average properties and outputting"
# 6. Convert chis into probabilities and calculates all average properties and errors
self.dof = len(wave)
probs = convert_chis_to_probs(chis, self.dof)
dist_lum = self.cosmo.luminosity_distance( self.specObs.redshift).to( u.cm ).value
#print(light_weights)
#print(np.shape(light_weights))
#stop
averages = calculate_averages_pdf(probs, light_weights, mass_weights, unnorm_mass, age, metal, self.pdf_sampling, dist_lum, self.flux_units)
unique_ages = np.unique(age)
marginalised_age_weights = np.zeros(np.shape(unique_ages))
marginalised_age_weights_int = np.sum(mass_weights.T,1)
for ua in range(len(unique_ages)):
marginalised_age_weights[ua] = np.sum(marginalised_age_weights_int[np.where(age==unique_ages[ua])])
best_fit_index = [np.argmin(chis)]
best_fit = np.dot(light_weights[best_fit_index],model_flux)[0]
# The attenuation curve as well as the full wavelength range are currently not used as output.
#attenuation = dust_calzetti_py(best_ebv,model_wave_int)
#self.attenuation = attenuation
#itp = interp1d(np.hstack(( 2000., wave, 20000)) , np.hstack((attenuation_curve[0], attenuation_curve, attenuation_curve[-1])) )
#attenuation = itp(model_wave_int)
#best_fit_full = np.dot(light_weights[best_fit_index]*mass_factors, model_flux_int)[0]*attenuation
#best_fit_full_noHPF = np.dot(light_weights[best_fit_index]*mass_factors, model_flux_int)[0]
# stores outputs in the object
self.best_fit_index = best_fit_index
self.best_fit = best_fit
#self.best_fit_full = best_fit_full
self.model_flux = model_flux
self.dist_lum = dist_lum
self.age = np.array(age)
self.metal = np.array(metal)
self.mass_weights = mass_weights
self.light_weights = light_weights
self.chis = chis
self.branch = branch
self.unnorm_mass = unnorm_mass
self.probs = probs
self.averages = averages
self.wave = wave
bf_mass = (self.mass_weights[self.best_fit_index]>0)[0]
bf_light = (self.light_weights[self.best_fit_index]>0)[0]
mass_per_ssp = self.unnorm_mass[self.best_fit_index[0]][bf_mass]*self.flux_units* 4 * np.pi * self.dist_lum**2.0
age_per_ssp = self.age[bf_mass]
metal_per_ssp = self.metal[bf_mass]
weight_mass_per_ssp = self.mass_weights[self.best_fit_index[0]][bf_mass]
weight_light_per_ssp = self.light_weights[self.best_fit_index[0]][bf_light]
order = np.argsort(-weight_light_per_ssp)
# Do we want to put this all in another function??
# We could provide it with the arrays and call something like get_massloss_parameters()?
# I think it looks a little untidy still because of my bad coding.
# Gets the mass loss factors.
if dict_imfs[self.imfs[0]] == 'Salpeter':
ML_metallicity, ML_age, ML_totM, ML_alive, ML_wd, ML_ns, ML_bh, ML_turnoff = np.loadtxt(join(os.environ['FF_DIR'],'data','massloss_salpeter.txt'), unpack=True, skiprows=2)
# First build the grids of the quantities. Make sure they are in linear units.
estimate_ML_totM, estimate_ML_alive, estimate_ML_wd = estimation(10**ML_metallicity, ML_age, ML_totM), estimation(10**ML_metallicity, ML_age, ML_alive), estimation(10**ML_metallicity, ML_age, ML_wd)
estimate_ML_ns, estimate_ML_bh, estimate_ML_turnoff = estimation(10**ML_metallicity, ML_age, ML_ns), estimation(10**ML_metallicity, ML_age, ML_bh), estimation(10**ML_metallicity, ML_age, ML_turnoff)
# Now loop through SSPs to find the nearest values for each.
final_ML_totM, final_ML_alive, final_ML_wd, final_ML_ns, final_ML_bh, final_ML_turnoff, final_gas_fraction = [], [], [], [], [], [], []
for number in range(len(age_per_ssp)):
new_ML_totM = estimate_ML_totM.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_alive = estimate_ML_alive.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_wd = estimate_ML_wd.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_ns = estimate_ML_ns.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_bh = estimate_ML_bh.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_turnoff = estimate_ML_turnoff.estimate(metal_per_ssp[number],age_per_ssp[number])
final_ML_totM.append(mass_per_ssp[number]*new_ML_totM)
final_ML_alive.append(mass_per_ssp[number]*new_ML_alive)
final_ML_wd.append(mass_per_ssp[number]*new_ML_wd)
final_ML_ns.append(mass_per_ssp[number]*new_ML_ns)
final_ML_bh.append(mass_per_ssp[number]*new_ML_bh)
final_ML_turnoff.append(mass_per_ssp[number]*new_ML_turnoff)
final_gas_fraction.append(mass_per_ssp[number]-new_ML_totM)
final_ML_totM, final_ML_alive, final_ML_wd, final_ML_ns, final_ML_bh, final_ML_turnoff, final_gas_fraction= np.array(final_ML_totM), np.array(final_ML_alive), np.array(final_ML_wd), np.array(final_ML_ns), np.array(final_ML_bh), np.array(final_ML_turnoff), np.array(final_gas_fraction)
if (dict_imfs[self.imfs[0]] == 'Chabrier'):
ML_metallicity, ML_age, ML_totM, ML_alive, ML_wd, ML_ns, ML_bh, ML_turnoff = np.loadtxt(join(os.environ['FF_DIR'],'data', 'massloss_chabrier.txt'), unpack=True, skiprows=2)
# First build the grids of the quantities. Make sure they are in linear units.
estimate_ML_totM, estimate_ML_alive, estimate_ML_wd = estimation(10**ML_metallicity, ML_age, ML_totM), estimation(10**ML_metallicity, ML_age, ML_alive), estimation(10**ML_metallicity, ML_age, ML_wd)
estimate_ML_ns, estimate_ML_bh, estimate_ML_turnoff = estimation(10**ML_metallicity, ML_age, ML_ns), estimation(10**ML_metallicity, ML_age, ML_bh), estimation(10**ML_metallicity, ML_age, ML_turnoff)
# Now loop through SSPs to find the nearest values for each.
final_ML_totM, final_ML_alive, final_ML_wd, final_ML_ns, final_ML_bh, final_ML_turnoff, final_gas_fraction = [], [], [], [], [], [], []
for number in range(len(age_per_ssp)):
new_ML_totM = estimate_ML_totM.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_alive = estimate_ML_alive.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_wd = estimate_ML_wd.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_ns = estimate_ML_ns.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_bh = estimate_ML_bh.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_turnoff = estimate_ML_turnoff.estimate(metal_per_ssp[number],age_per_ssp[number])
final_ML_totM.append(mass_per_ssp[number]*new_ML_totM)
final_ML_alive.append(mass_per_ssp[number]*new_ML_alive)
final_ML_wd.append(mass_per_ssp[number]*new_ML_wd)
final_ML_ns.append(mass_per_ssp[number]*new_ML_ns)
final_ML_bh.append(mass_per_ssp[number]*new_ML_bh)
final_ML_turnoff.append(mass_per_ssp[number]*new_ML_turnoff)
final_gas_fraction.append(mass_per_ssp[number]-new_ML_totM)
final_ML_totM, final_ML_alive, final_ML_wd, final_ML_ns, final_ML_bh, final_ML_turnoff, final_gas_fraction= np.array(final_ML_totM), np.array(final_ML_alive), np.array(final_ML_wd), np.array(final_ML_ns), np.array(final_ML_bh), np.array(final_ML_turnoff), np.array(final_gas_fraction)
if (dict_imfs[self.imfs[0]] == 'Kroupa'):
ML_metallicity, ML_age, ML_totM, ML_alive, ML_wd, ML_ns, ML_bh, ML_turnoff = np.loadtxt(join(os.environ['FF_DIR'],'data','massloss_kroupa.txt'), unpack=True, skiprows=2)
# First build the grids of the quantities. Make sure they are in linear units.
estimate_ML_totM, estimate_ML_alive, estimate_ML_wd = estimation(10**ML_metallicity, ML_age, ML_totM), estimation(10**ML_metallicity, ML_age, ML_alive), estimation(10**ML_metallicity, ML_age, ML_wd)
estimate_ML_ns, estimate_ML_bh, estimate_ML_turnoff = estimation(10**ML_metallicity, ML_age, ML_ns), estimation(10**ML_metallicity, ML_age, ML_bh), estimation(10**ML_metallicity, ML_age, ML_turnoff)
# Now loop through SSPs to find the nearest values for each.
final_ML_totM, final_ML_alive, final_ML_wd, final_ML_ns, final_ML_bh, final_ML_turnoff, final_gas_fraction = [], [], [], [], [], [], []
for number in range(len(age_per_ssp)):
new_ML_totM = estimate_ML_totM.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_alive = estimate_ML_alive.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_wd = estimate_ML_wd.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_ns = estimate_ML_ns.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_bh = estimate_ML_bh.estimate(metal_per_ssp[number],age_per_ssp[number])
new_ML_turnoff = estimate_ML_turnoff.estimate(metal_per_ssp[number],age_per_ssp[number])
final_ML_totM.append(mass_per_ssp[number]*new_ML_totM)
final_ML_alive.append(mass_per_ssp[number]*new_ML_alive)
final_ML_wd.append(mass_per_ssp[number]*new_ML_wd)
final_ML_ns.append(mass_per_ssp[number]*new_ML_ns)
final_ML_bh.append(mass_per_ssp[number]*new_ML_bh)
final_ML_turnoff.append(mass_per_ssp[number]*new_ML_turnoff)
final_gas_fraction.append(mass_per_ssp[number]-new_ML_totM)
final_ML_totM, final_ML_alive, final_ML_wd, final_ML_ns, final_ML_bh, final_ML_turnoff, final_gas_fraction= np.array(final_ML_totM), np.array(final_ML_alive), np.array(final_ML_wd), np.array(final_ML_ns), np.array(final_ML_bh), np.array(final_ML_turnoff), np.array(final_gas_fraction)
# Calculate the total mass loss from all the SSP contributions.
combined_ML_totM = np.sum(final_ML_totM)
combined_ML_alive = np.sum(final_ML_alive)
combined_ML_wd = np.sum(final_ML_wd)
combined_ML_ns = np.sum(final_ML_ns)
combined_ML_bh = np.sum(final_ML_bh)
combined_gas_fraction = np.sum(mass_per_ssp - final_ML_totM)
# 8. It writes the output file
waveCol = pyfits.Column(name="wavelength",format="D", unit="Angstrom", array= wave)
dataCol = pyfits.Column(name="original_data",format="D", unit="1e-17erg/s/cm2/Angstrom", array= data_flux)
errorCol = pyfits.Column(name="flux_error",format="D", unit="1e-17erg/s/cm2/Angstrom", array= error_flux)
best_fitCol = pyfits.Column(name="firefly_model",format="D", unit="1e-17erg/s/cm2/Angstrom", array= best_fit)
#waveCol_um = pyfits.Column(name="wavelength",format="D", unit="Angstrom", array= model_wave_int)
#best_fitCol_um = pyfits.Column(name="firefly_model",format="D", unit="1e-17erg/s/cm2/Angstrom", array= best_fit_full)
#best_fitCol_um_noHPF = pyfits.Column(name="firefly_model_noHPF",format="D", unit="1e-17erg/s/cm2/Angstrom", array= best_fit_full_noHPF)
#cols = pyfits.ColDefs([ waveCol_um, best_fitCol_um, best_fitCol_um_noHPF]) # dataCol, errorCol, waveCol, best_fitCol,
cols = pyfits.ColDefs([waveCol, dataCol, errorCol, best_fitCol])
tbhdu = pyfits.BinTableHDU.from_columns(cols)
#tbhdu.header['HIERARCH age_universe (Gyr)'] = trylog10(self.cosmo.age(self.specObs.redshift).value*10**9)
tbhdu.header['HIERARCH redshift'] = self.specObs.redshift
tbhdu.header['HIERARCH Age_unit'] = 'log (age/Gyr)'
tbhdu.header['HIERARCH Metallicity_unit'] = '[Z/H]'
tbhdu.header['HIERARCH Mass_unit'] = 'log (M/Msun)'
tbhdu.header['HIERARCH SSP_sfr'] = 'log (M*/Age(Gyr))'
tbhdu.header['IMF'] = dict_imfs[self.imfs[0]]
tbhdu.header['Model'] = self.model_libs[0]
tbhdu.header['HIERARCH converged'] = 'True'
tbhdu.header['HIERARCH age_lightW'] = trylog10(averages['light_age'])
tbhdu.header['HIERARCH age_lightW_up_1sig'] = trylog10(averages['light_age_1_sig_plus'])
tbhdu.header['HIERARCH age_lightW_low_1sig'] = trylog10(averages['light_age_1_sig_minus'])
tbhdu.header['HIERARCH age_lightW_up_2sig'] = trylog10(averages['light_age_2_sig_plus'])
tbhdu.header['HIERARCH age_lightW_low_2sig'] = trylog10(averages['light_age_2_sig_minus'])
tbhdu.header['HIERARCH age_lightW_up_3sig'] = trylog10(averages['light_age_3_sig_plus'])
tbhdu.header['HIERARCH age_lightW_low_3sig'] = trylog10(averages['light_age_3_sig_minus'])
tbhdu.header['HIERARCH metallicity_lightW'] = trylog10(averages['light_metal'])
tbhdu.header['HIERARCH metallicity_lightW_up_1sig'] = trylog10(averages['light_metal_1_sig_plus'])
tbhdu.header['HIERARCH metallicity_lightW_low_1sig'] = trylog10(averages['light_metal_1_sig_minus'])
tbhdu.header['HIERARCH metallicity_lightW_up_2sig'] = trylog10(averages['light_metal_2_sig_plus'])
tbhdu.header['HIERARCH metallicity_lightW_low_2sig'] = trylog10(averages['light_metal_2_sig_minus'])
tbhdu.header['HIERARCH metallicity_lightW_up_3sig'] = trylog10(averages['light_metal_3_sig_plus'])
tbhdu.header['HIERARCH metallicity_lightW_low_3sig'] = trylog10(averages['light_metal_3_sig_minus'])
tbhdu.header['HIERARCH age_massW'] = trylog10(averages['mass_age'])
tbhdu.header['HIERARCH age_massW_up_1sig'] = trylog10(averages['mass_age_1_sig_plus'])
tbhdu.header['HIERARCH age_massW_low_1sig'] = trylog10(averages['mass_age_1_sig_minus'])
tbhdu.header['HIERARCH age_massW_up_2sig'] = trylog10(averages['mass_age_2_sig_plus'])
tbhdu.header['HIERARCH age_massW_low_2sig'] = trylog10(averages['mass_age_2_sig_minus'])
tbhdu.header['HIERARCH age_massW_up_3sig'] = trylog10(averages['mass_age_3_sig_plus'])
tbhdu.header['HIERARCH age_massW_low_3sig'] = trylog10(averages['mass_age_3_sig_minus'])
tbhdu.header['HIERARCH metallicity_massW'] = trylog10(averages['mass_metal'])
tbhdu.header['HIERARCH metallicity_massW_up_1sig'] = trylog10(averages['mass_metal_1_sig_plus'])
tbhdu.header['HIERARCH metallicity_massW_low_1sig'] = trylog10(averages['mass_metal_1_sig_minus'])
tbhdu.header['HIERARCH metallicity_massW_up_2sig'] = trylog10(averages['mass_metal_2_sig_plus'])
tbhdu.header['HIERARCH metallicity_massW_low_2sig'] = trylog10(averages['mass_metal_2_sig_minus'])
tbhdu.header['HIERARCH metallicity_massW_up_3sig'] = trylog10(averages['mass_metal_3_sig_plus'])
tbhdu.header['HIERARCH metallicity_massW_low_3sig'] = trylog10(averages['mass_metal_3_sig_minus'])
tbhdu.header['HIERARCH total_mass'] = trylog10(averages['stellar_mass'])
tbhdu.header['HIERARCH stellar_mass'] = trylog10(combined_ML_alive+combined_ML_wd+combined_ML_ns+combined_ML_bh)
tbhdu.header['HIERARCH living_stars_mass'] = trylog10(combined_ML_alive)
tbhdu.header['HIERARCH remnant_mass'] = trylog10(combined_ML_wd+combined_ML_ns+combined_ML_bh)
tbhdu.header['HIERARCH remnant_mass_in_whitedwarfs'] = trylog10(combined_ML_wd)
tbhdu.header['HIERARCH remnant_mass_in_neutronstars'] = trylog10(combined_ML_ns)
tbhdu.header['HIERARCH remnant_mass_blackholes'] = trylog10(combined_ML_bh)
tbhdu.header['HIERARCH mass_of_ejecta'] = trylog10(combined_gas_fraction)
tbhdu.header['HIERARCH total_mass_up_1sig'] = trylog10(averages['stellar_mass_1_sig_plus'])
tbhdu.header['HIERARCH total_mass_low_1sig'] = trylog10(averages['stellar_mass_1_sig_minus'])
tbhdu.header['HIERARCH total_mass_up_2sig'] = trylog10(averages['stellar_mass_2_sig_plus'])
tbhdu.header['HIERARCH total_mass_low_2sig'] = trylog10(averages['stellar_mass_2_sig_minus'])
tbhdu.header['HIERARCH total_mass_up_3sig'] = trylog10(averages['stellar_mass_3_sig_plus'])
tbhdu.header['HIERARCH total_mass_low_3sig'] = trylog10(averages['stellar_mass_3_sig_minus'])
tbhdu.header['HIERARCH EBV'] = best_ebv
tbhdu.header['HIERARCH ssp_number'] =len(order)
# quantities per SSP
for iii in range(len(order)):
tbhdu.header['HIERARCH total_mass_ssp_'+str(iii)] = trylog10(mass_per_ssp[order][iii])
tbhdu.header['HIERARCH stellar_mass_ssp_'+str(iii)] = trylog10(final_ML_alive[order][iii]+final_ML_wd[order][iii]+final_ML_ns[order][iii]+final_ML_bh[order][iii])
tbhdu.header['HIERARCH living_stars_mass_ssp_'+str(iii)] = trylog10(final_ML_alive[order][iii])
tbhdu.header['HIERARCH remnant_mass_ssp_'+str(iii)] = trylog10(final_ML_wd[order][iii]+final_ML_ns[order][iii]+final_ML_bh[order][iii])
tbhdu.header['HIERARCH remnant_mass_in_whitedwarfs_ssp_'+str(iii)] = trylog10(final_ML_wd[order][iii])
tbhdu.header['HIERARCH remnant_mass_in_neutronstars_ssp_'+str(iii)] = trylog10(final_ML_ns[order][iii])
tbhdu.header['HIERARCH remnant_mass_in_blackholes_ssp_'+str(iii)] = trylog10(final_ML_bh[order][iii])
tbhdu.header['HIERARCH mass_of_ejecta_ssp_'+str(iii)] = trylog10(mass_per_ssp[order][iii] - final_ML_totM[order][iii])
tbhdu.header['HIERARCH log_age_ssp_'+str(iii)] = trylog10(age_per_ssp[order][iii])
tbhdu.header['HIERARCH metal_ssp_'+str(iii)] = trylog10(metal_per_ssp[order][iii])
tbhdu.header['HIERARCH SFR_ssp_'+str(iii)] = trylog10(mass_per_ssp[order][iii]/age_per_ssp[order][iii])
tbhdu.header['HIERARCH weightMass_ssp_'+str(iii)] = weight_mass_per_ssp[order][iii]
tbhdu.header['HIERARCH weightLight_ssp_'+str(iii)] = weight_light_per_ssp[order][iii]
self.tbhdu = tbhdu
prihdr = pyfits.Header()
prihdr['file'] = self.specObs.path_to_spectrum
prihdr['model'] = self.models
prihdr['ageMin'] = self.age_limits[0]
prihdr['ageMax'] = self.age_limits[1]
prihdr['Zmin'] = self.Z_limits[0]
prihdr['Zmax'] = self.Z_limits[1]
prihdu = pyfits.PrimaryHDU(header=prihdr)
self.thdulist = pyfits.HDUList([prihdu, tbhdu])
if self.write_results :
if os.path.isfile(self.outputFile + self.suffix ):
os.remove(self.outputFile + self.suffix )
#print self.outputFile + self.suffix , thdulist, thdulist[1].data, thdulist[0].header
self.thdulist.writeto(self.outputFile + self.suffix )
return 1.
else :
return 0.
def create_dummy_hdu(self):
"""
creates an empty HDU table in case computation did not converge
"""
default_array = np.array([default_value,default_value])
waveCol = pyfits.Column(name="wavelength",format="D", unit="Angstrom", array= default_array)
#dataCol = pyfits.Column(name="original_data",format="D", unit="1e-17erg/s/cm2/Angstrom", array= default_array)
#errorCol = pyfits.Column(name="flux_error",format="D", unit="1e-17erg/s/cm2/Angstrom", array= default_array)
best_fitCol = pyfits.Column(name="firefly_model",format="D", unit="1e-17erg/s/cm2/Angstrom", array= default_array)
#best_fitCol_um = pyfits.Column(name="firefly_model_unmasked",format="D", unit="1e-17erg/s/cm2/Angstrom", array= default_array)
#cols = pyfits.ColDefs([ waveCol, best_fitCol, best_fitCol_um]) # dataCol, errorCol,
cols = pyfits.ColDefs([ waveCol, best_fitCol]) # dataCol, errorCol,
tbhdu = pyfits.BinTableHDU.from_columns(cols)
tbhdu.header['IMF'] = dict_imfs[self.imfs[0]]
tbhdu.header['library'] = self.model_libs[0]
tbhdu.header['HIERARCH converged'] = 'False'
tbhdu.header['HIERARCH age_lightW'] = default_value
tbhdu.header['HIERARCH age_lightW_up'] = default_value
tbhdu.header['HIERARCH age_lightW_low'] = default_value
tbhdu.header['HIERARCH metallicity_lightW'] = default_value
tbhdu.header['HIERARCH metallicity_lightW_up'] = default_value
tbhdu.header['HIERARCH metallicity_lightW_low'] = default_value
tbhdu.header['HIERARCH age_massW'] = default_value
tbhdu.header['HIERARCH age_massW_up'] = default_value
tbhdu.header['HIERARCH age_massW_low'] = default_value
tbhdu.header['HIERARCH metallicity_massW'] = default_value
tbhdu.header['HIERARCH metallicity_massW_up'] = default_value
tbhdu.header['HIERARCH metallicity_massW_low'] = default_value
tbhdu.header['HIERARCH total_mass'] = default_value
tbhdu.header['HIERARCH total_mass_up'] = default_value
tbhdu.header['HIERARCH total_mass_low'] = default_value
tbhdu.header['HIERARCH EBV'] = default_value
tbhdu.header['HIERARCH ssp_number'] = default_value
return tbhdu
|
#Copyright (C) 2017 Interview Druid, Parineeth M. R.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
from __future__ import print_function
import sys
import random
MAX_NUM_ELEMENTS = 10
MAX_VALUE = 10
MAX_NUM_TESTS = 10
def handle_error() :
print('Error occured ')
sys.exit(1)
#a: input list that has to be sorted.
#pivot_value: after sorting, all elements smaller than pivot will lie to the
#left of the pivot and all values that are greater than pivot will lie to the
#right of the pivot. If there are many pivot values, then they will occur
#together in the middle
def dutch_sort(a, pivot_value) :
cur_pos = 0
left_pos = 0
right_pos = len(a) - 1
while (cur_pos <= right_pos) :
if (a[cur_pos] < pivot_value) :
#swap a[left_pos], a[cur_pos]
a[left_pos], a[cur_pos] = a[cur_pos], a[left_pos]
left_pos += 1
cur_pos += 1
elif (a[cur_pos] > pivot_value) :
#swap a[cur_pos], a[right_pos]
a[cur_pos], a[right_pos] = a[right_pos], a[cur_pos]
#Advance the right fill location. Since we have newly
#brought in an element from right_pos to cur_pos, we
#have to process the new element. So don't advance cur_pos
right_pos -= 1
else :
cur_pos += 1
def test(a, pivot_value) :
print('Pivot value = {}'.format(pivot_value) )
print('Before : ', end='')
print(a)
#Perform the sort
dutch_sort(a, pivot_value)
#Verify the result of dutch sort
i = 0
length = len(a)
#All elements less than pivot should occur first
while (i < length and a[i] < pivot_value ):
i += 1
#If there are one or more pivot values, they should come next
while (i < length and a[i] == pivot_value):
i += 1
#All elements greater than the pivot value should occur at the end
while ( i < length and a[i] > pivot_value):
i += 1
if (i != length):
handle_error()
print('After : ', end='')
print(a)
print('_______________________________________________')
if (__name__ == '__main__'):
a = [0] * MAX_NUM_ELEMENTS
#Run several number of tests
for loop in range(MAX_NUM_TESTS):
#Pick the number of elements in the list randomly
length = random.randint(1, MAX_NUM_ELEMENTS)
#Randomly generate the elements in the list
a = [random.randint(1, MAX_VALUE) for i in range(length)]
#Pick a random pivot
i = random.randint(0, length - 1)
pivot_value = a[i]
test(a, pivot_value)
print('Test passed')
|
import csv
import io
import logging
import warnings
from urllib.parse import quote as urlquote
import dateutil.parser
import msgpack
log = logging.getLogger(__name__)
def create_url(tmpl, **values):
"""Create url with values
Args:
tmpl (str): url template
values (dict): values for url
"""
quoted_values = {k: urlquote(str(v)) for k, v in values.items()}
return tmpl.format(**quoted_values)
def validate_record(record):
"""Check that `record` contains a key called "time".
Args:
record (dict): a dictionary representing a data record, where the
keys name the "columns".
Returns:
True if there is a key called "time" (it actually checks for ``"time"``
(a string) and ``b"time"`` (a binary)). False if there is no key
called "time".
"""
if not any(k in record for k in ("time", b"time")):
warnings.warn(
'records should have "time" column to import records properly.',
category=RuntimeWarning,
)
return True
def guess_csv_value(s):
"""Determine the most appropriate type for `s` and return it.
Tries to interpret `s` as a more specific datatype, in the following
order, and returns the first that succeeds:
1. As an integer
2. As a floating point value
3. If it is "false" or "true" (case insensitive), then as a boolean
4. If it is "" or "none" or "null" (case insensitive), then as None
5. As the string itself, unaltered
Args:
s (str): a string value, assumed to have been read from a CSV file.
Returns:
A good guess at a more specific value (int, float, str, bool or None)
"""
try:
return int(s)
except (OverflowError, ValueError):
try:
return float(s)
except (OverflowError, ValueError):
pass
lower = s.lower()
if lower in ("false", "true"):
return "true" == lower
elif lower in ("", "none", "null"):
return None
else:
return s
# Convert our dtype names to callables that parse a string into that type
DTYPE_TO_CALLABLE = {
"bool": bool,
"float": float,
"int": int,
"str": str,
"guess": guess_csv_value,
}
def merge_dtypes_and_converters(dtypes=None, converters=None):
"""Generate a merged dictionary from those given.
Args:
dtypes (optional dict): A dictionary mapping column name to "dtype"
(datatype), where "dtype" may be any of the strings 'bool', 'float',
'int', 'str' or 'guess'.
converters (optional dict): A dictionary mapping column name to a
callable. The callable should take a string as its single argument,
and return the result of parsing that string.
Internally, the `dtypes` dictionary is converted to a temporary dictionary
of the same form as `converters` - that is, mapping column names to
callables. The "data type" string values in `dtypes` are converted to the
Python builtins of the same name, and the value `"guess"` is converted to
the `tdclient.util.guess_csv_value`_ callable.
Example:
>>> merge_dtypes_and_converters(
... dtypes={'col1': 'int', 'col2': 'float'},
... converters={'col2': int},
... )
{'col1': int, 'col2': int}
Returns:
(dict) A dictionary which maps column names to callables.
If a column name occurs in both input dictionaries, the callable
specified in `converters` is used.
"""
our_converters = {}
if dtypes is not None:
try:
for column_name, dtype in dtypes.items():
our_converters[column_name] = DTYPE_TO_CALLABLE[dtype]
except KeyError as e:
raise ValueError(
"Unrecognized dtype %r, must be one of %s"
% (dtype, ", ".join(repr(k) for k in sorted(DTYPE_TO_CALLABLE)))
)
if converters is not None:
for column_name, parse_fn in converters.items():
our_converters[column_name] = parse_fn
return our_converters
def parse_csv_value(k, s, converters=None):
"""Given a CSV (string) value, work out an actual value.
Args:
k (str): The name of the column that the value belongs to.
s (str): The value as read from the CSV input.
converters (optional dict): A dictionary mapping column name to callable.
If `converters` is given, and there is a key matching `k` in `converters`,
then ``converters[k](s)`` will be called to work out the return value.
Otherwise, `tdclient.util.guess_csv_value`_ will be called with `s` as its
argument.
.. warning:: No attempt is made to cope with any errors occurring in a
callable from the `converters` dictionary. So if ``int`` is called
on the string ``"not-an-int"`` the resulting ``ValueError`` is not
caught.
Example:
>>> repr(parse_csv_value('col1', 'A string'))
'A string'
>>> repr(parse_csv_value('col1', '10'))
10
>>> repr(parse_csv_value('col1', '10', {'col1': float, 'col2': int}))
10.0
Returns:
The value for the CSV column, after parsing by a callable from
`converters`, or after parsing by `tdclient.util.guess_csv_value`_.
"""
if converters is None:
parse_fn = guess_csv_value
else:
parse_fn = converters.get(k, guess_csv_value)
return parse_fn(s)
def csv_dict_record_reader(file_like, encoding, dialect):
"""Yield records from a CSV input using csv.DictReader.
This is a reader suitable for use by `tdclient.util.read_csv_records`_.
It is used to read CSV data when the column names are read from the first
row in the CSV data.
Args:
file_like: acts like an instance of io.BufferedIOBase. Reading from it
returns bytes.
encoding (str): the name of the encoding to use when turning those
bytes into strings.
dialect (str): the name of the CSV dialect to use.
Yields:
For each row of CSV data read from `file_like`, yields a dictionary
whose keys are column names (determined from the first row in the CSV
data) and whose values are the column values.
"""
reader = csv.DictReader(io.TextIOWrapper(file_like, encoding), dialect=dialect)
for row in reader:
yield row
def csv_text_record_reader(file_like, encoding, dialect, columns):
"""Yield records from a CSV input using csv.reader and explicit column names.
This is a reader suitable for use by `tdclient.util.read_csv_records`_.
It is used to read CSV data when the column names are supplied as an
explicit `columns` parameter.
Args:
file_like: acts like an instance of io.BufferedIOBase. Reading from it
returns bytes.
encoding (str): the name of the encoding to use when turning those
bytes into strings.
dialect (str): the name of the CSV dialect to use.
Yields:
For each row of CSV data read from `file_like`, yields a dictionary
whose keys are column names (determined by `columns`) and whose values
are the column values.
"""
reader = csv.reader(io.TextIOWrapper(file_like, encoding), dialect=dialect)
for row in reader:
yield dict(zip(columns, row))
def read_csv_records(csv_reader, dtypes=None, converters=None, **kwargs):
"""Read records using csv_reader and yield the results.
"""
our_converters = merge_dtypes_and_converters(dtypes, converters)
for row in csv_reader:
record = {k: parse_csv_value(k, v, our_converters) for (k, v) in row.items()}
validate_record(record)
yield record
def create_msgpack(items):
"""Create msgpack streaming bytes from list
Args:
items (list of dict): target list
Returns:
Converted msgpack streaming (bytes)
Examples:
>>> t1 = int(time.time())
>>> l1 = [{"a": 1, "b": 2, "time": t1}, {"a":3, "b": 6, "time": t1}]
>>> create_msgpack(l1)
b'\\x83\\xa1a\\x01\\xa1b\\x02\\xa4time\\xce]\\xa5X\\xa1\\x83\\xa1a\\x03\\xa1b\\x06\\xa4time\\xce]\\xa5X\\xa1'
"""
stream = io.BytesIO()
packer = msgpack.Packer()
for item in items:
try:
mp = packer.pack(item)
except (OverflowError, ValueError):
packer.reset()
mp = packer.pack(normalized_msgpack(item))
stream.write(mp)
return stream.getvalue()
def normalized_msgpack(value):
"""Recursively convert int to str if the int "overflows".
Args:
value (list, dict, int, float, str, bool or None): value to be normalized
If `value` is a list, then all elements in the list are (recursively)
normalized.
If `value` is a dictionary, then all the dictionary keys and values are
(recursively) normalized.
If `value` is an integer, and outside the range ``-(1 << 63)`` to
``(1 << 64)``, then it is converted to a string.
Otherwise, `value` is returned unchanged.
Returns:
Normalized value
"""
if isinstance(value, (list, tuple)):
return [normalized_msgpack(v) for v in value]
elif isinstance(value, dict):
return dict(
[(normalized_msgpack(k), normalized_msgpack(v)) for (k, v) in value.items()]
)
if isinstance(value, int):
if -(1 << 63) < value < (1 << 64):
return value
else:
return str(value)
else:
return value
def get_or_else(hashmap, key, default_value=None):
""" Get value or default value
It differs from the standard dict ``get`` method in its behaviour when
`key` is present but has a value that is an empty string or a string of
only spaces.
Args:
hashmap (dict): target
key (Any): key
default_value (Any): default value
Example:
>>> get_or_else({'k': 'nonspace'}, 'k', 'default')
'nonspace'
>>> get_or_else({'k': ''}, 'k', 'default')
'default'
>>> get_or_else({'k': ' '}, 'k', 'default')
'default'
Returns:
The value of `key` or `default_value`
"""
value = hashmap.get(key)
if value is None:
return default_value
else:
if 0 < len(value.strip()):
return value
else:
return default_value
def parse_date(s):
"""Parse date from str to datetime
TODO: parse datetime using an optional format string
For now, this does not use a format string since API may return date in ambiguous format :(
Args:
s (str): target str
Returns:
datetime
"""
try:
return dateutil.parser.parse(s)
except ValueError:
log.warning("Failed to parse date string: %s", s)
return None
def normalize_connector_config(config):
"""Normalize connector config
This is porting of TD CLI's ConnectorConfigNormalizer#normalized_config.
see also: https://github.com/treasure-data/td/blob/15495f12d8645a7b3f6804098f8f8aca72de90b9/lib/td/connector_config_normalizer.rb#L7-L30
Args:
config (dict): A config to be normalized
Returns:
dict: Normalized configuration
Examples:
Only with ``in`` key in a config.
>>> config = {"in": {"type": "s3"}}
>>> normalize_connector_config(config)
{'in': {'type': 's3'}, 'out': {}, 'exec': {}, 'filters': []}
With ``in``, ``out``, ``exec``, and ``filters`` in a config.
>>> config = {
... "in": {"type": "s3"},
... "out": {"mode": "append"},
... "exec": {"guess_plugins": ["json"]},
... "filters": [{"type": "speedometer"}],
... }
>>> normalize_connector_config(config)
{'in': {'type': 's3'},
'out': {'mode': 'append'},
'exec': {'guess_plugins': ['json']},
'filters': [{'type': 'speedometer'}]}
"""
if "in" in config:
return {
"in": config["in"],
"out": config.get("out", {}),
"exec": config.get("exec", {}),
"filters": config.get("filters", []),
}
elif "config" in config:
if len(config) != 1:
raise ValueError(
"Setting sibling keys with 'config' key isn't support. "
"Set within the 'config' key, or put all the settings without 'config'"
"key."
)
return normalize_connector_config(config["config"])
else:
return {"in": config, "out": {}, "exec": {}, "filters": []}
|
from sklearn.metrics import accuracy_score, confusion_matrix, hamming_loss, hinge_loss, log_loss
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.preprocessing import normalize
from sklearn.multiclass import OneVsRestClassifier
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
skf = StratifiedKFold(n_splits=11, shuffle=True, random_state=101)
kf = KFold(n_splits=11, shuffle=True, random_state=101)
def train_model(model, vects, target, labels, **kwargs):
model_performance = {
'loss': [],
'accuracy': [],
}
for train_indices, test_indices in skf.split(vects, target):
X_train = vects[train_indices]
y_train = target[train_indices]
X_test = vects[test_indices]
y_test = target[test_indices]
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
model_performance['loss'].append(hamming_loss(y_test, y_pred))
model_performance['accuracy'].append(accuracy_score(y_test, y_pred))
fig = plt.figure(figsize=(20, 6))
ax1 = plt.subplot2grid((1, 3), (0, 0), colspan=2)
ax1.plot(model_performance['loss'], label='loss per iteration')
ax1.plot(np.ones(10)*np.mean(model_performance['loss']), '--', label='mean loss')
ax1.plot(model_performance['accuracy'], label='accuracy per iteration')
ax1.plot(np.ones(10)*np.mean(model_performance['accuracy']), '--', label='mean accuracy')
ax1.grid()
ax1.legend()
ax1.set_xlabel('fold')
ax1.set_ylabel('value')
ax1.set_title('Model Performance')
cm = normalize(confusion_matrix(y_test, y_pred), axis=1, norm='l1')*100
ax2 = plt.subplot2grid((1, 3), (0, 2))
sns.heatmap(cm, annot=True, square=True, ax=ax2, cmap='Blues')
ax2.set_title(f'Confusion Matrix')
ax2.set_xlabel('Predicted')
ax2.set_xticklabels(labels, rotation = 45)
ax2.set_ylabel('Actual')
ax2.set_yticklabels(labels, rotation = 45)
return model_performance, cm, model
def train_model_one_vs_rest(model, vects, target, labels, **kwargs):
model_performance = {
'loss': [],
'accuracy': [],
}
model = OneVsRestClassifier(model)
for train_indices, test_indices in kf.split(vects, target):
X_train = vects[train_indices]
y_train = target[train_indices]
X_test = vects[test_indices]
y_test = target[test_indices]
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
model_performance['loss'].append(hamming_loss(y_test, y_pred))
model_performance['accuracy'].append(accuracy_score(y_test, y_pred))
fig = plt.figure(figsize=(20, 18))
ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=2)
ax1.plot(model_performance['loss'], label='loss per iteration')
ax1.plot(np.ones(10)*np.mean(model_performance['loss']), '--', label='mean loss')
ax1.plot(model_performance['accuracy'], label='accuracy per iteration')
ax1.plot(np.ones(10)*np.mean(model_performance['accuracy']), '--', label='mean accuracy')
ax1.grid()
ax1.legend()
ax1.set_xlabel('fold')
ax1.set_ylabel('value')
ax1.set_title('Model Performance')
cm = []
cm.append(normalize(confusion_matrix(y_test[:, 0], y_pred[:, 0]), axis=1, norm='l1')*100)
ax2 = plt.subplot2grid((3, 3), (0, 2))
sns.heatmap(cm[-1], annot=True, square=True, ax=ax2, cmap='Blues')
ax2.set_title(f'Confusion Matrix \'{labels[0]}\'')
ax2.set_xlabel('Predicted')
ax2.set_ylabel('Actual')
for i, l in enumerate(labels[1:]):
cm.append(normalize(confusion_matrix(y_test[:, i+1], y_pred[:, i+1]), axis=1, norm='l1')*100)
ax2 = plt.subplot2grid((3, 3), (i//3+1, i%3))
sns.heatmap(cm[-1], annot=True, square=True, ax=ax2, cmap='Blues')
ax2.set_title(f'Confusion Matrix \'{l}\'')
ax2.set_xlabel('Predicted')
ax2.set_ylabel('Actual')
return model_performance, cm, model |
#!usr/bin/python
# _*_ coding:utf-8 _*_
import urllib2
import cookielib
import bs4
import re
import MySQLdb
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def get_movie(url):
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 (KHTML, like Gecko) Version/8.0.8 Safari/600.8.9',
'Connection':'keep-alive',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Cache-Control':'max-age=0'
}
cookie = cookielib.CookieJar()
handler = urllib2.HTTPCookieProcessor(cookie)
opener = urllib2.build_opener(handler)
request = urllib2.Request(url = url,headers = headers)
response = urllib2.urlopen(request)
contents = response.read()
content = bs4.BeautifulSoup(contents,'html.parser')
return content
def get_url(url):
get_url_num_content = get_movie(url)
get_url_num = get_url_num_content.find_all(name = 'span',class_ = 'pageinfo')
get_num = get_url_num[0].strong.string
total_url = []
for i in range(int(get_num)):
complete_url = url + "/?PageNo=" + str(i+1)
total_url.append(complete_url)
return total_url
def get_all_name(url):
movie_name = get_movie(url)
count = movie_name.find_all(name = 'p',class_ = 'tt cl')
return count
def get_single_name(url):
single_name = get_all_name(url)
num = single_name.__len__()
s_list = []
for i in range(num):
s = single_name[i].b
s_sum = s.contents
s_list.append(s_sum)
return s_list
def get_movie_name(url):
one_name = get_single_name(url)
name_list = []
for i in range(one_name.__len__()):
if one_name[i].__len__() == 1:
name_test = one_name[i][0]
try:
name = name_test.contents[0]
except:
name = name_test
else:
name = one_name[i][0]
name_list.append(name)
return name_list
def get_douban_goals(url):
db_goal = get_movie(url)
single_db_goal = db_goal.find_all(name = 'p',class_ = 'rt')
goal_num = single_db_goal.__len__()
goal_list = []
for i in range(goal_num):
try:
int_goal = single_db_goal[i].strong.string
point_goal = single_db_goal[i].find_all(name = 'em',class_ = 'fm')[0].string
integrated_goal = int(int_goal) + float(point_goal)/10
goal_list.append(integrated_goal)
except:
pass
return goal_list
def combination_name_goal(url):
e = get_movie_name(url)
f = get_douban_goals(url)
d = {}
for i in range(e.__len__()):
d[e[i]] = f[i]
reverse_goal = sorted(d.iteritems(),key = lambda d:d[1],reverse = True)
return reverse_goal
def save_name(url):
c = get_movie_name(url)
w = get_douban_goals(url)
conn = MySQLdb.connect(host = 'localhost',user = 'root',charset = 'utf8')
cui = conn.cursor()
cui.execute("""create database if not exists bt_sql """)
conn.select_db("bt_sql")
cui.execute("""create table if not exists tet(name varchar(30),goal float) """)
try:
for i in range(c.__len__()):
z = [c[i],w[i]]
cui.execute("insert into tet(name,goal) value(%s,%s)",z)
except:
pass
conn.commit()
conn.close()
URL = 'http://www.bttiantang.com'
o = get_url(URL)
#s = "豆瓣评分:".decode("utf-8").encode("GBK")
#dot = u'\u2027'
page = int(0)
for i in range(o.__len__()):
page = page + 1
print page
single_page_url = o[i]
f = combination_name_goal(single_page_url)
l = save_name(single_page_url)
# for i in range(f.__len__()):
# n = f[i][1]
# try:
# m = f[i][0].encode("GBK")
# print "%-30s%s%.1f" % (m,s,n)
# except:
# continue
|
from collections import OrderedDict
from flask import Blueprint
from .. import __version__
from ._utils import CHANGELOG_URL, track
blueprint = Blueprint('root', __name__, url_prefix="/")
@blueprint.route("")
def index():
"""Track code coverage metrics."""
metadata = OrderedDict()
metadata['version'] = __version__
metadata['changelog'] = CHANGELOG_URL
return track(metadata)
|
@@ -0,0 +1,307 @@
import numpy as np
import json
import sys
import csv
class Parameters:
def __init__(self, parameter_dictionary):
self.max_adaptive_period = parameter_dictionary["MAXIMUM_ADAPTIVE_PERIOD"]
self.seed = parameter_dictionary["SEED"]
self.output_file = parameter_dictionary["OUTPUT_FILENAME"]
self.colony_size = parameter_dictionary["SIZE_OF_COLONY"]
self.game_size = parameter_dictionary["NUMBER_OF_WORKERS_PER_GAME_GROUP"]
self.k_reward_foraging = parameter_dictionary["LINEAR_SLOPE_OF_REWARD_FROM_FORAGING"]
self.alpha_foraging_contribution = parameter_dictionary["PROPORTION_OF_CONTRIBUTION_FROM_FORAGING"]
self.k_cost_fanning = parameter_dictionary["LINEAR_SLOPE_OF_COST_OF_FANNING"]
self.q1_cost_foraging = parameter_dictionary["QUADRATIC_SQUARED_COEFFICIENT_OF_COST_OF_FORAGING"]
self.q2_cost_foraging = parameter_dictionary["QUADRATIC_LINEAR_COEFFICIENT_OF_COST_OF_FORAGING"]
self.selection_intensity = parameter_dictionary["INTENSITY_OF_SELECTION"]
self.mutation_rate = parameter_dictionary["RATE_OF_MUTATION"]
self.mutation_perturbation = parameter_dictionary["PERTURBATION_OF_MUTATION"]
self.k_reinforcement_learning = parameter_dictionary["NUMBER_OF_GAME_ROUNDS_BEFORE_STRATEGY_REINFORCEMENT"]
class Simulation:
def __init__(self, parameters):
self.parameters = parameters
np.random.seed(self.parameters.seed)
@staticmethod
def linear_perception(k, x):
"""
:param k: Slope
:param x: List of input values
:return: List of linear evaluations
"""
return k * x
@staticmethod
def quadratic_perception(a, b, x):
"""
:param a: Coefficient for squared term
:param b: Coefficient for linear term
:param x: List of input values
:return: List of quadratic evaluations
"""
return a * (x ** 2) + b * x
def benefit_quadratic_fanning_linear_foraging(self, traits_per_game):
"""
This function is to compute the overall benefit from quadratic fanning & linear foraging under
the condition that the shared foraging benefit is discounted by the fanning benefit in a single game.
:param traits_per_game: List of traits, indicating the likelihood that workers are engaged into fanning
:return: List of benefits
"""
number_of_workers = len(traits_per_game)
return self.quadratic_perception(-4.0 / (number_of_workers ** 2), 4.0 / number_of_workers,
np.sum(traits_per_game)) * self.linear_perception(self.parameters.k_reward_foraging,
self.parameters.alpha_foraging_contribution * np.sum(1.0 - traits_per_game)
) / number_of_workers
def cost_linear_fanning_quadratic_foraging(self, traits_per_game):
"""
This function is to compute the overall cost of linear fanning & quadratic foraging in a single game.
:param traits_per_game: List of traits, indicating the likelihood of time that workers are engaged into fanning
:return: List of costs
"""
return self.linear_perception(self.parameters.k_cost_fanning, traits_per_game) + self.quadratic_perception(
self.parameters.q1_cost_foraging, self.parameters.q2_cost_foraging, 1.0 - traits_per_game)
def evaluate(self, payoffs_all):
"""
This function is to evaluate the payoffs of workers in a colony for the replicating process.
:param payoffs_all: List of payoffs
:return: List of evaluations
"""
return np.exp(self.parameters.selection_intensity * payoffs_all)
def payoff(self, benefit_function, cost_function, traits_all):
"""
This function is to compute the payoffs of workers in a colony at a single adaptive period.
:param benefit_function: Function specifying the benefit of workers in a game
:param cost_function: Function specifying the cost of workers in a game
:param traits_all: List of traits
:return: List of payoffs
"""
number_of_workers = len(traits_all)
payoffs_all = np.zeros(number_of_workers)
number_of_games = int(number_of_workers / self.parameters.game_size)
for j in np.arange(number_of_games):
payoffs_all[self.parameters.game_size * j:self.parameters.game_size * (j + 1)] = benefit_function(
traits_all[self.parameters.game_size * j:self.parameters.game_size * (j + 1)]
) - cost_function(
traits_all[self.parameters.game_size * j:self.parameters.game_size * (j + 1)])
return payoffs_all
def mutate_gaussian(self, traits_all):
"""
This function enables Gaussian mutation to happen on a colony.
:param traits_all: List of traits that mutation acts at
:return: List of indices of mutants
"""
number_of_workers = len(traits_all)
number_of_mutants = np.random.binomial(number_of_workers, self.parameters.mutation_rate)
mutant_indices = np.random.choice(number_of_workers, number_of_mutants, False)
for i in mutant_indices:
traits_all[i] = np.random.normal(loc=traits_all[i], scale=self.parameters.mutation_perturbation)
traits_all[traits_all < 0] = 0.0
traits_all[traits_all > 1] = 1.0
return mutant_indices
def adopted_probability(self, focalPayoff, selectedOthers):
l = len(selectedOthers)
adoptedPro = []
for payoff in selectedOthers:
temp = (payoff/(focalPayoff + payoff))/l
adoptedPro.append(temp)
focalPro = 1 - np.sum(adoptedPro)
return [focalPro].append(adoptedPro)
def replicate_recruitment(self, evaluations_all, traits_all):
re = []
for evaluate in evaluations_all:
p_weighted = self.adopted_probability(evaluate, evaluations_all)
re = np.append(re, np.random.choice(traits_all, size=1, p=p_weighted))
return re
@staticmethod
def replicate(evaluations_all, traits_all):
"""
This function is to replicate individuals based on the roulette-wheel selection.
:param evaluations_all: List of evaluations based on which this process follows
:param traits_all: List of traits that this process acts at
:return: List of traits in the next step
"""
number_of_workers = len(traits_all)
if np.all(evaluations_all == 0):
p_weighted = np.zeros(number_of_workers)
p_weighted.fill(1.0 / number_of_workers)
else:
p_weighted = evaluations_all / np.sum(evaluations_all)
re = np.random.choice(traits_all, size=number_of_workers, p=p_weighted)
return re
def mutate_gaussian_single(self, trait):
"""
This function enables Gaussian mutation to happen on a single individual.
:param trait: Trait that mutation acts at
:return: Mutated trait
"""
mutated_trait = np.random.normal(loc=trait, scale=self.parameters.mutation_perturbation)
if mutated_trait < 0:
mutated_trait = 0.0
if mutated_trait > 1:
mutated_trait = 1.0
return mutated_trait
def expected_payoffs(self, benefit_function, cost_function, traits_all,
index_mutant, trait_mutant_old, trait_mutant_new):
"""
This function is to compute the payoffs of the individual before and after mutation.
:param benefit_function: Function specifying the benefit of workers in a game
:param cost_function: Function specifying the cost of workers in a game
:param traits_all: List of traits
:param index_mutant: Index of the mutant
:param trait_mutant_old: Trait of the mutant before mutation
:param trait_mutant_new: Trait of the mutant after mutation
:return: Payoffs of the mutated individual before and after mutation
"""
number_of_workers = len(traits_all)
expected_payoffs = np.zeros(2)
traits_in_game_old = np.zeros(self.parameters.game_size)
traits_in_game_new = np.zeros(self.parameters.game_size)
for g in np.arange(self.parameters.k_reinforcement_learning):
indices_others_in_game = np.random.choice(number_of_workers, self.parameters.game_size - 1, replace=False)
if index_mutant in indices_others_in_game:
indices_others_in_game[indices_others_in_game == index_mutant] = np.random.choice(
np.delete(np.arange(number_of_workers), indices_others_in_game))
traits_in_game_old[0] = trait_mutant_old
traits_in_game_old[1::] = traits_all[indices_others_in_game]
traits_in_game_new[0] = trait_mutant_new
traits_in_game_new[1::] = traits_all[indices_others_in_game]
payoffs_old = benefit_function(traits_in_game_old) - cost_function(traits_in_game_old)
payoffs_new = benefit_function(traits_in_game_new) - cost_function(traits_in_game_new)
expected_payoffs[0] += payoffs_old[0]
expected_payoffs[1] += payoffs_new[0]
expected_payoffs *= (1 / self.parameters.k_reinforcement_learning)
return expected_payoffs
def individual_reinforcement_payoff(self, traits_initial, benefit_function, cost_function, record_frequency=2000):
"""
This function simulates the process of task allocation based on individual reinforcement and
saves the result into file.
:param benefit_function: Function specifying the benefits of workers in a game
:param cost_function: Function specifying the costs of workers in a game
:param traits_initial: List of initial probabilities of workers in a colony to select Task A
:param record_frequency: Frequency that data is written into output
"""
traits_all = traits_initial
number_of_workers = len(traits_all)
payoffs_all = np.zeros(number_of_workers)
is_in_games = np.zeros(number_of_workers, dtype=bool)
output_line = np.zeros(number_of_workers + 2)
output_file = open(self.parameters.output_file, 'w')
output_writer = csv.writer(output_file)
for i in np.arange(self.parameters.max_adaptive_period):
number_of_mutants = np.random.binomial(number_of_workers, self.parameters.mutation_rate)
if number_of_mutants != 0:
indices_mutant = np.random.choice(number_of_workers, number_of_mutants, False)
for index_mutant in indices_mutant:
trait_mutant_old = traits_all[index_mutant]
trait_mutant_new = self.mutate_gaussian_single(trait_mutant_old)
payoffs_old_new_mutant = self.expected_payoffs(benefit_function, cost_function,
traits_all, index_mutant, trait_mutant_old, trait_mutant_new)
if payoffs_old_new_mutant[0] < payoffs_old_new_mutant[1]:
traits_all[index_mutant] = trait_mutant_new
payoffs_all[index_mutant] = payoffs_old_new_mutant[1]
else:
payoffs_all[index_mutant] = payoffs_old_new_mutant[0]
is_in_games[indices_mutant] = True
if np.remainder(i, record_frequency) == 0:
output_line[0] = i
output_line[1:-1] = traits_all
output_line[-1] = np.mean(payoffs_all[is_in_games])
output_writer.writerow(np.around(output_line, decimals=3))
output_file.close()
def social_learning(self, traits_initial, benefit_function, cost_function, record_frequency=200):
"""
This function simulates the process of task allocation based on social learning and saves the results into file.
:param benefit_function: Function specifying the benefits of workers in a game
:param cost_function: Function specifying the costs of workers in a game
:param traits_initial: List of initial probabilities of workers in a colony to select Task A
:param record_frequency: Frequency that data is written into output
"""
traits_all = traits_initial
output_file = open(self.parameters.output_file, 'w')
output_writer = csv.writer(output_file)
for i in np.arange(self.parameters.max_adaptive_period):
payoffs_record = self.payoff(benefit_function, cost_function, traits_all)
traits_all = self.replicate(self.evaluate(payoffs_record), traits_all)
self.mutate_gaussian(traits_all)
if np.remainder(i, record_frequency) == 0:
output_line = np.append(i, traits_all)
output_line = np.append(output_line, np.mean(payoffs_record))
output_writer.writerow(['{:0.3f}'.format(x) for x in output_line])
output_file.close()
def social_learning_recruitment(self, traits_initial, benefit_function, cost_function, record_frequency=200):
"""
This function simulates the process of task allocation based on social learning and saves the results into file.
:param benefit_function: Function specifying the benefits of workers in a game
:param cost_function: Function specifying the costs of workers in a game
:param traits_initial: List of initial probabilities of workers in a colony to select Task A
:param record_frequency: Frequency that data is written into output
"""
traits_all = traits_initial
output_file = open(self.parameters.output_file, 'w')
output_writer = csv.writer(output_file)
for i in np.arange(self.parameters.max_adaptive_period):
payoffs_record = self.payoff(benefit_function, cost_function, traits_all)
traits_all = self.replicate_recruitment(self.evaluate(payoffs_record), traits_all)
self.mutate_gaussian(traits_all)
if np.remainder(i, record_frequency) == 0:
output_line = np.append(i, traits_all)
output_line = np.append(output_line, np.mean(payoffs_record))
output_writer.writerow(['{:0.3f}'.format(x) for x in output_line])
output_file.close()
def run(json_filename, which_process, traits_initial=None):
parameter_file = open(json_filename)
parameter_dictionary = json.load(parameter_file)
parameter_file.close()
parameters = Parameters(parameter_dictionary)
simulation = Simulation(parameters)
if traits_initial is None:
traits_initial = np.ones(simulation.parameters.colony_size) * 0.5
if which_process == "social_learning":
simulation.social_learning(traits_initial,
simulation.benefit_quadratic_fanning_linear_foraging,
simulation.cost_linear_fanning_quadratic_foraging)
elif which_process == "individual_payoff":
simulation.individual_reinforcement_payoff(traits_initial,
simulation.benefit_quadratic_fanning_linear_foraging,
simulation.cost_linear_fanning_quadratic_foraging)
elif which_process == "recruitment_payoff":
simulation.social_learning_recruitment(traits_initial,
simulation.benefit_quadratic_fanning_linear_foraging,
simulation.cost_linear_fanning_quadratic_foraging)
if __name__ == '__main__':
assert len(sys.argv) == 3, "Please specify the filename of parameters and the name of model"
run(sys.argv[1], sys.argv[2]) |
# Search Twitter using Tweepy
# This program uses the module Tweepy to search Twitter for tweets with the two given tags.
import tweepy
import time
import xlsxwriter
import configparser
# Location of the config file
CONFIG_FILE = 'config.ini'
# Read config file
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
#authorize
username = config['common']['username']
password = config['common']['password']
consumer_token = config['common']['consumer_token']
consumer_secret = config['common']['consumer_secret']
auth = tweepy.OAuthHandler( consumer_token, consumer_secret )
access_token = config['common']['access_token']
access_secret = config['common']['access_secret']
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
#ask user
usersearch = str(input("Insert a phrase or phrases you would like to search for separated by spaces: "))
searchtermsinit = usersearch.split(" ")
userdates = str(input("""Insert the start date and end date you would like to search for separated a space and in the format YEAR-0MM-DD
(NOTE: only dates within the last week are accessible): """))
searchdates = userdates.split(" ")
startSince = searchdates[0]
endUntil = searchdates[1]
usernews = str(input("Would you like to search for news keywords as well? (Y/N): "))
#write into a code
newfile = searchtermsinit[0] + "_output.xlsx"
openworkbook = xlsxwriter.Workbook(newfile)
#search parameters
searchitems = []
if usernews == "Y" or usernews == "y":
for item in searchtermsinit:
searchitems.append( "'" + item + "'" )
elif usernews == "N" or usernews == "n":
for item in searchtermsinit:
temp = "'" + item + "'"
searchitems.append( temp + 'AND "CNN"' )
searchitems.append( temp + 'AND "Fox News"' )
searchitems.append( temp + 'AND "MSNBC"' )
searchitems.append( temp + 'AND "BBC"' )
# define search function
def search( searchitem, start, end, workbook ):
# add worksheet to workbook
worksheet = workbook.add_worksheet()
# create titles for columns
worksheet.write("A1", "Number of Tweets")
worksheet.write("B1", "Date")
worksheet.write("C1", "Time")
worksheet.write("D1", "Username")
worksheet.write("E1", "News Org")
worksheet.write("F1", "Source")
worksheet.write("G1", "Code")
worksheet.write("H1", "Tweet")
#define empty list/count
totaldata = []
#run search
itsasearch = tweepy.Cursor(api.search,q=searchitem, since=start, until=end, lang="en").items()
#write search into spreadsheet
while True:
try:
# define initial parameters
NewsOrg = "Random Person"
retweet = "N"
# data from tweet defined
tweetdata = itsasearch.next()
# tweet data split into a list
tweetnums = str(tweetdata.created_at).split(" ")
# date taken from list
date = tweetnums[0]
# time taken from list
tweettime = tweetnums[1]
# username, display name and tweet taken from data
username = tweetdata.user.screen_name
Source = tweetdata.user.name
tweettext = tweetdata.text
# code define as search terms
Code = searchitem
# check if source is a news organization or a random person
if Source.find("CNN") != -1 or Source.find("FoxNews") != -1 or Source.find("BBC") != -1 or Source.find("MSNBC") != -1:
NewsOrg = "News Org"
# check if tweet is original or not
if hasattr(tweetdata, 'retweeted_status'):
retweet = "Y"
# if tweet is not a retweet, create a list of relevant info
if retweet == "N":
inddata = [ date, tweettime, username, NewsOrg, Source, Code, tweettext ]
totaldata.append( inddata )
# if search quota met, wait 15 min and continue search
except tweepy.TweepError:
print("Waiting to continue search...")
time.sleep(60 * 15)
print("Continuing Search!")
print("\n")
continue
# break loop when search ends
except StopIteration:
break
# write data from every tweet into worksheet
for i in range(1, len(totaldata)):
worksheet.write("A" + str(i+1), i)
worksheet.write("B" + str(i+1), totaldata[i-1][0])
worksheet.write("C" + str(i+1), totaldata[i-1][1])
worksheet.write("D" + str(i+1), totaldata[i-1][2])
worksheet.write("E" + str(i+1), totaldata[i-1][3])
worksheet.write("F" + str(i+1), totaldata[i-1][4])
worksheet.write("G" + str(i+1), totaldata[i-1][5])
worksheet.write("H" + str(i+1), totaldata[i-1][6])
# run searches
for word in searchitems:
search( word, searchdates[0], searchdates[1], openworkbook )
output_file.close()
|
# Copyright 2013-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the command line options specific to failbot
from os.path import exists
from sys import argv
from dbbot import CommandLineOptions
class WriterOptions(CommandLineOptions):
@property
def output_file_path(self):
return self._options.output_file_path
@property
def report_title(self):
return self._options.report_title
def _add_parser_options(self):
super(WriterOptions, self)._add_parser_options()
self._add_output_option()
self._add_title_option()
def _add_output_option(self):
self._parser.add_option('-o', '--output',
dest='output_file_path',
help='path to the resulting html file',
)
def _add_title_option(self):
self._parser.add_option('-n', '--name',
dest='report_title',
help='the report title',
)
def _get_validated_options(self):
if len(argv) < 2:
self._exit_with_help()
options = super(WriterOptions, self)._get_validated_options()
if not options.output_file_path:
self._parser.error('output html filename is required')
return options
|
import pytest
from datetime import datetime
from src.inspetor.model.inspetor_item import InspetorItem
from src.inspetor.exception.model_exception.inspetor_item_exception import InspetorItemException
class TestInspetorItem:
def get_default_item(self):
item = InspetorItem()
item.id = "123"
item.event_id = "123"
item.session_id = "123"
item.seating_option = "Seating Option Test"
item.price = "10"
item.quantity = "123"
return item
def test_if_is_valid(self):
item = self.get_default_item()
assert item.is_valid() is None |
from triple_triple_etl.load.postgres.nbastats_postgres_etl import NBAStatsPostgresETL
from triple_triple_etl.constants import (
BASE_URL_PLAY,
BASE_URL_BOX_SCORE_TRADITIONAL,
BASE_URL_BOX_SCORE_PLAYER_TRACKING
)
if __name__ == '__main__':
game_id = '0021500568'
season = '2015-16'
params = {
'EndPeriod': '10', # default by NBA stats (acceptablevalues: 1, 2, 3, 4)
'EndRange': '55800', # not sure what this is
'GameID': game_id,
'RangeType': '2', # not sure what this is
'Season': season,
'SeasonType': 'Regular Season',
'StartPeriod': '1', # acceptable values: 1, 2, 3, 4
'StartRange': '0', # not sure what this is
}
pbp_input = {
'base_url': BASE_URL_PLAY,
'params': params,
'data_content': 0,
'schema_file': 'nbastats_tables.yaml'
}
bs_traditional_input = {
'base_url': BASE_URL_BOX_SCORE_TRADITIONAL,
'params': params,
'data_content': 1,
'schema_file': 'nbastats_tables.yaml'
}
bs_player_tracking_input = {
'base_url': BASE_URL_BOX_SCORE_PLAYER_TRACKING,
'params': params,
'data_content': 2,
'schema_file': 'nbastats_tables.yaml'
}
etl_pbp = NBAStatsPostgresETL(**pbp_input)
etl_pbp.run()
etl_bs_traditional = NBAStatsPostgresETL(**bs_traditional_input)
etl_bs_traditional.run()
etl_bs_player_tracking = NBAStatsPostgresETL(**bs_player_tracking_input)
etl_bs_player_tracking.run()
|
import csv
from urllib.parse import urlparse, parse_qs
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from apps.articles.models import Section
from apps.utils.converters import perl_to_python_dict
class Command(BaseCommand):
help = 'Migrate partners channels from csv'
def add_arguments(self, parser):
parser.add_argument('--path', help='/path/to/file.csv')
def handle(self, *args, **kwargs):
self.stdout.write('Start...')
path = kwargs.get('path')
if not path:
raise CommandError('Path is required')
with open(path, 'r', encoding=settings.MIGRATE_FILE_ENCODING) as csvfile:
reader = csv.reader(csvfile)
sections = []
for row in reader:
try:
data = perl_to_python_dict(row[8])
except Exception as e:
self.stderr.write(e)
continue
query_string = urlparse(data.get('rss')).query
query_dict = parse_qs(query_string)
channel = query_dict.get('user')
if isinstance(channel, (list, tuple)):
channel = channel[0]
sections.append(
Section(
name=row[7], slug=data.get('section_alias'), is_video=True,
channel=channel, is_active=bool(int(row[5])), ext_id=row[0]
)
)
Section.objects.bulk_create(sections, batch_size=100)
self.stdout.write('End...')
|
import json
# j = json.loads('{"one" : "1", "two" : "2", "three" : "3"}')
# print j['two']
file_object = open(r"positive_examples_titles.json","r")
titles = file_object.read()
j = json.loads(titles)
# print j
# print j[0]['title']
l=[]
for i in j:
l.append(i['title'])
# uncomment to print the list
# for i in l:
# print i
file_object.close()
|
from Graph import *
import os
class TPGenerator:
"""生成测试点"""
def GenerateTP(self, path, file_name, exe_name, ty, count=10, node_num=100, edge_num=100, weight_limit=20):
self.file_path = path + "\\" + file_name
self.exe_path = path + "\\" + exe_name
if ty == 'DAG':
for i in range(1, count+1):
n = random.randint(int(node_num/2), node_num)
m = random.randint(n-1, min(n*2, edge_num))
obj = Graph(n, m)
input_filename = self.file_path + str(i) + ".in"
output_filename = self.file_path + str(i) + ".out"
f = open(input_filename, "w")
line = str(obj.n)+" "+str(obj.m)
f.write(line+"\n")
for j in range(obj.m):
line = obj.GetEdge(j)
f.write(line+"\n")
f.close()
f = open(output_filename, "w") #创建
f.close()
self.run(self.exe_path, input_filename, output_filename)
"""从.in运行exe得到.out"""
def run(self, exe_path, input_filename, output_filename):
command = exe_path + "<" + input_filename + ">" + output_filename
os.system(command)
|
"""
tcp_client套接字编程 : 客户端流程
思路:逐步骤完成操作
重点代码
"""
from socket import * # 调用 套接字模块
# 创建套接字对象 tcp套接字
sockfd = socket() # tcp套接字参数默认值即为tcp套接字
# 链接服务端程序
server_addr = ("172.40.74.151", 8888) # 服务端IP地址,端口号
sockfd.connect(server_addr)
# 消息发送接收
while True:
data = input("Msg>>")
# 如果什么不输入,直接回车,退出
if not data:
break
sockfd.send(data.encode()) # 字符串转换字节串发送
data = sockfd.recv(1024)
print("啥东东",data)
# 关闭套接字,断开服务端连接
sockfd.close() |
from __future__ import with_statement
import numpy as np
import sys
from PySide import QtCore, QtGui
from equalibria import Ui_MainWindow
class DesignerMainWindow(QtGui.QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
super(DesignerMainWindow, self).__init__(parent)
self.setupUi(self)
foo = self.tabWidget.setContentsMargins(0,0,0,0)
print(foo)
def calculate_state(self, info=None):
print("Calculating state with {}".format(info))
app = QtGui.QApplication(sys.argv)
dmw = DesignerMainWindow()
dmw.show()
sys.exit(app.exec_())
|
#!/usr/bin/ipython
from ROOT import TH1F,TGraph
from dice import *
from numpy import mean,asarray
def counthits(pool,limit=None,edge=False):
nhits=0
for i in range(pool):
roll=die.roll1dX(6)
if roll>4:
nhits+=1
while roll==6 and edge:
roll=die.roll1dX(6)
if roll>4:
nhits+=1
if nhits>limit and limit and not edge:
nhits=limit
return nhits
def extendedTest(pool,threshold,limit=None):
iRound=0
hits=0
while pool>0:
if(hits<threshold):
hits+=counthits(pool,limit)
iRound+=1
pool-=1
if hits>=threshold:
return iRound
else:
return False
def main():
hits=TH1F("nhits","nhits",20,-0.5,19.5)
evade=TH1F("nevade","nevade",20,-0.5,19.5)
nethits=TH1F("net","net",41,-20.5,20.5)
damage=TH1F("damage","damage",31,-0.5,30.5)
damage=TH1F("damage","damage",31,-0.5,30.5)
attackpool=9+6+2+2-4+5
accuracy=6
dv=13
ap=18
evadepool=11-3
armorpool=35
for i in range(int(1e4)):
attack=counthits(attackpool,accuracy,edge=True)
eva=counthits(evadepool-5)
hits.Fill(attack)
evade.Fill(eva)
nethits.Fill(attack-eva)
if((attack-eva)>0):
damage.Fill(attack-eva+dv-counthits(armorpool-ap))
else:
damage.Fill(0)
damage.Sumw2()
hits.Scale(1./hits.GetEntries())
evade.Scale(1./evade.GetEntries())
nethits.Scale(1./nethits.GetEntries())
damage.Scale(1./damage.GetEntries())
damage.Sumw2()
damage.Draw()
return damage
def MeanHits(pool,limit):
results=[]
for i in range(int(1e4)):
results.append(counthits(pool,limit))
mean_hits=mean(results)
return mean_hits
def HitsVsPool():
meanhits=[]
pools=[]
for ipool in range(40):
pools.append(float(ipool))
for pool in pools:
meanhits.append(MeanHits(int(pool),6))
myGraph=TGraph(len(pools),asarray(pools),asarray(meanhits))
myGraph.Draw("ALP")
return myGraph
def when(pool,threshold):
rounds=TH1F("rounds","rounds",20,-0.5,19.5)
for i in range(10000):
iround=extendedTest(pool,threshold)
if(iround):
rounds.Fill(iround)
return rounds
damage=main()
damage.Fit("gaus")
damage.Draw()
#myGraph=HitsVsPool()
#rounds=when(pool=4,threshold=2)
|
from flask import Flask
from config import my_config
from config import Config
from flask_oauthlib.client import OAuth
from . import my_constants
from celery import Celery
oauth = OAuth()
yammer_rank_oauth = oauth.remote_app(
'Yammer Rank',
consumer_key=my_constants.CLIENT_ID,
consumer_secret=my_constants.CLIENT_SECRET,
base_url='https://www.yammer.com/oauth2/authorize',
request_token_url=None,
#request_token_params={'scope': 'get_user_info'},
request_token_params=None,
access_token_url='https://www.yammer.com/oauth2/access_token'
#authorize_url= my_constants.AUTH_URL
)
#celery
'''
my_celery = Celery(__name__,
broker=Config.CELERY_BROKER_URL,
backend=Config.CELERY_RESULT_BACKEND)
'''
def create_app(config_name):
app = Flask(__name__)
#print("DEBUG __name__ :{}".format(__name__))
print("DEBUG Flask app created at yammer_rank/__init__.py, __name__: {}".format(__name__))
#print("DEBUG app.name: {}".format(app.name))
app.config.from_object(my_config[config_name])
my_config[config_name].init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth_bp
app.register_blueprint(auth_bp, url_prefix="/auth")
#oauth authentication
oauth.init_app(app)
#my_celery.conf.update(app.config)
return app |
import numpy as np
import random
def shuffle(x):
x = list(x)
random.shuffle(x)
return x
class setClass:
def __init__(self,oldSet):
self.oldSet=oldSet
def print1(self):
return self.oldSet
def intersect(self,newSet):
intersectSet=[]
for x in self.oldSet:
if x in newSet:
intersectSet.append(x)
return intersectSet
def subtract(self,newSet):
subtractSet=[]
for x in self.oldSet:
if x not in newSet:
subtractSet.append(x)
return subtractSet
def checkValidity(oldSetIndex,newSetIndex,relation):
oldSet=setNamesTruth[oldSetIndex]
newSet=setNamesTruth[newSetIndex]
oldSet=setClass(oldSet)
if relation=='someAre':
if len(oldSet.intersect(newSet))!=0:
return 1
else:
return 0
elif relation=='allAre':
if set(oldSet.intersect(newSet))==set(oldSet.print1()):
return 1
else:
return 0
elif relation=='noAre':
if len(oldSet.intersect(newSet))==0:
return 1
else:
return 0
def createBinarySet(leftSet,rightSet,relation):
alphabets=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
capitals=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
alphabets=alphabets+capitals
if len(leftSet)==0:
leftSet=list(np.random.choice(alphabets,random.choice([4,5,6,7]),replace=False))
if relation=='all':
newAlphabets=[x for x in alphabets if x not in leftSet]
rightSet=leftSet+list(np.random.choice(newAlphabets,random.choice([3,4]),replace=False))
elif relation=='some':
commonAlphabets=list(np.random.choice(leftSet,random.choice([2,3,4]),replace=False))
newAlphabets=[x for x in alphabets if x not in leftSet]
rightSet=commonAlphabets+list(np.random.choice(newAlphabets,random.choice([3,4]),replace=False))
elif relation=='no':
newAlphabets=[x for x in alphabets if x not in leftSet]
rightSet=list(np.random.choice(newAlphabets,random.choice([4,5,6,7]),replace=False))
else:
pass
return leftSet,rightSet
def createIndex():
indexList=[]
unusedQueue=shuffle(range(5))
usedQueue=[unusedQueue.pop()]
while len(unusedQueue)!=0:
j=random.choice(usedQueue)
k=unusedQueue.pop()
usedQueue.append(k)
indexList.append([j,k])
return indexList
def createQuestionIndex(indexList):
questionIndex={}
for sublist in indexList:
for i in range(len(sublist)):
if sublist[i] in questionIndex.keys():
questionIndex[sublist[i]].append(sublist[abs(len(sublist)-i-1)])
else:
questionIndex[sublist[i]]=[sublist[abs(len(sublist)-i-1)]]
return questionIndex
def createQuestion(questionIndex):
itemRange=range(len(questionIndex))
item=random.choice(itemRange)
unusedRelation=[x for x in itemRange if x not in questionIndex[item] and x != item]
return [item,random.choice(unusedRelation),random.choice(['allAre','someAre','noAre'])]
def main():
indexList=createIndex()
questionIndex=createQuestionIndex(indexList)
global setNames
setNames={0:[],1:[],2:[],3:[],4:[]}
relationList=[]
for i in range(len(indexList)):
relationList.append(random.choice(['all','some','no']))
setNames[indexList[i][0]],setNames[indexList[i][1]]=createBinarySet(setNames[indexList[i][0]],setNames[indexList[i][1]],relationList[i])
for key in setNames.keys():
print key,'--->',setNames[key]
questionList=[]
while len(questionList)!=3:
tempMain=createQuestion(questionIndex)
if tempMain not in questionList:
questionList.append(tempMain)
return indexList,relationList,questionList
def createTruthTable(indexList,relationList,questionList,truthTable):
global setNamesTruth
setNamesTruth={0:[],1:[],2:[],3:[],4:[]}
for i in range(len(indexList)):
setNamesTruth[indexList[i][0]],setNamesTruth[indexList[i][1]]=createBinarySet(setNamesTruth[indexList[i][0]],setNamesTruth[indexList[i][1]],relationList[i])
for i in range(len(questionList)):
tempTruth=checkValidity(questionList[i][0],questionList[i][1],questionList[i][2])
truthTable[i]+=tempTruth
return truthTable
indexList,relationList,questionList=main()
truthTable=[0,0,0]
for i in range(10000):
truthTable=createTruthTable(indexList,relationList,questionList,truthTable)
print indexList,relationList,questionList
for i in range(3):
print truthTable[i]
|
#!/usr/bin/python
from unicodedata import normalize
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import svm
import nltk
import numpy
nltk.download('rslp')
_ARTICLES = ['a', 'as', 'o', 'os']
_PREPOSITIONS = ['a', 'ante', 'ate', 'apos', 'com', 'contra', 'para', 'per', 'por',\
'perante', 'sem', 'sob', 'sobre', 'tras', 'na', 'no', 'nas', 'nos']
vectorizer = CountVectorizer()
classifier = svm.SVC(probability=True,kernel='linear')
def remove_accentuation(text):
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII')
def remove_useless_words(word_list):
# remove all words starting with $
no_parameter_words = [word for word in word_list if word[0] != '$']
return [word for word in no_parameter_words if word not in _ARTICLES and word not in _PREPOSITIONS]
def replace_nonalphanumeric_by_space(text):
""" replaces all non alphanumeric chars that are not the $ by space """
return re.sub(r'[^0-9a-z\$]', ' ', text)
'''
transform_text:
1. Converts to lower case
2. replaces characters with accentuation by its pure corresponding char
3. replaces all chars that are not letters or numbers or $ by space
4. splits the text into a list of words. The separator is any whitespace char
5. removes all whitespace chars in the borders of each word
6. removes all articles and prepositions of the list of words
7. stems the remaing words
'''
def transform_text(text):
stemmer = nltk.stem.RSLPStemmer()
no_accentuation_text = remove_accentuation(text.lower())
only_alphanumeric_text = replace_nonalphanumeric_by_space(no_accentuation_text)
without_useless = remove_useless_words([word.strip() for word in only_alphanumeric_text.split()])
return [stemmer.stem(el) for el in without_useless]
def load_classifier():
fsample = open('samples/training_input.txt', 'r')
raw_rows = fsample.readlines()
fsample.close()
labels = []
samples = []
for row in raw_rows:
splt = row.split()
if splt:
labels.append(splt[0])
samples.append(' '.join(splt[1:]))
transformed_samples = [' '.join(transform_text(el)) for el in samples]
print(transformed_samples)
matrix = vectorizer.fit_transform(transformed_samples)
print(matrix.toarray())
print (labels)
classifier.fit(matrix, labels)
print(classifier)
def classify(sentence):
transformed_sentence = ' '.join(transform_text(sentence))
data_array = vectorizer.transform([transformed_sentence])
probabilities = classifier.predict_proba(data_array)[0]
peak_index = numpy.argmax(probabilities)
print (probabilities)
print(probabilities[peak_index])
result = None
# empiric: most results that make sense have probability >= 0.56
if probabilities[peak_index] >= 0.56:
result = classifier.classes_[peak_index]
return result
load_classifier()
|
#!/usr/bin/env python3
import unittest
import pandas as pd
from pandas.util.testing import assert_series_equal
from unittest.mock import patch
from datetime import datetime, timedelta
import time
#from location_data_tests import getSiteNums, clean_up_sites, pullinfo, getDateInput
import os
from campsiteFinder import source
class test_UserInputs(unittest.TestCase):
def test_user_inputs_correct_dates(self):
user_input = [
'iAMaString', #string entry
'1234', #int entry
'12-August-2018', #bad format #1
'12 Aug 2018', #bad format #2
'12-08-2018', #bad format #3
'08-12-2018', #bad format #4
str(datetime.today() - timedelta(days=1)), #edge case day before
str(datetime.today()), #edge case day of
'2018-03-12', #date in the past
str(datetime.today() - timedelta(days=30*4+1)), #far edge of search window
'2018-08-12', #good start entry
#rerun test for second input loop
'iAMaString', #string entry
'1234', #int entry
'14-August-2018', #bad format #1
'14 Aug 2018', #bad format #2
'14-08-2018', #bad format #3
'08-14-2018', #bad format #4
str(datetime.today() - timedelta(days=1)), #edge case day before
str(datetime.today()),
'2018-08-11', #day before search start
'2018-08-12', #entry same date as search start
'2018-08-14' #good end entry
]
expected_dates = ('2018-08-12', '2018-08-14')
with patch('builtins.input', side_effect=user_input):
stacks = source.getDateInput()
self.assertEqual(stacks, expected_dates)
def test_user_inputs_correct_site_numbers(self):
user_input=[
'iAMaString',
'1234',
'12 34',
'1 2 3 4 1 2 3 4',
'1 3 9'
'1-2-3',
'1, 2, 6'
]
expected_site_list = [0, 1, 5]
#incorrect = len(user_input)-1
#count = 0
with patch('builtins.input', side_effect=user_input):
output = source.getSiteNums()
self.assertEqual(output, expected_site_list)
def test_user_site_list_are_ints(self):
user_input = ['1, 3, 4']
with patch('builtins.input', side_effect=user_input):
output = source.getSiteNums()
for s in output:
self.assertEqual(s, int(s))
def test_input_email(self):
user_input = [
'notaGoodaddress',
'@gmail.com',
'mike@',
'john@.com',
'john.smith@gmail.com'
]
expected_email = 'john.smith@gmail.com'
with patch('builtins.input', side_effect=user_input):
output = source.getEmailaAddress()
self.assertEqual(output, expected_email)
def test_yes_response_for_repeater(self):
user_input = [
'123',
'fox',
'y',
'5',
'10'
]
expected_output = (5, 10, True)
with patch('builtins.input', side_effect=user_input):
output = source.asktoRepeat()
self.assertEqual(output, expected_output)
def test_no_response_for_repeater(self):
user_input = [
'123',
'fox',
'n'
]
expected_output = (0, 0, False)
with patch('builtins.input', side_effect=user_input):
output = source.asktoRepeat()
self.assertEqual(output, expected_output)
class TestBasicFuncs(unittest.TestCase):
def setUp(self):
pass
def test_clean_up_sites_commas(self):
self.assertEqual(source.clean_up_sites('1, 3, 4,'), '1 3 4')
def test_clean_up_sites_extra_spaces(self):
self.assertEqual(source.clean_up_sites('1 3 4 5 2'), '1 3 4 5 2')
def test_clean_up_sites_spacesandcommas(self):
self.assertEqual(source.clean_up_sites('1, 3, 4 5,'), '1 3 4 5')
def test_int_list(self):
self.assertEqual(source.intList(['1','2', '3']), [0, 1, 2])
def test_int_list_finds_duplicate_values(self):
self.assertEqual(source.intList(['1','2', '2', '2', '4']), [0, 1, 3])
def test_dates_are_added_to_payload_dict(self):
self.assertEqual(source.addDates(datetime.strptime('2018-08-12', '%Y-%m-%d'), datetime.strptime('2018-09-01', '%Y-%m-%d')),
{'arrivalDate': 'Sun Aug 12 2018', 'departureDate': 'Sat Sep 01 2018', 'camping_common_3012': "4"})
class DFTests(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
|
import AddressBook # noqa: F401
from PyObjCTools.TestSupport import TestCase, min_sdk_level
class TestABPersonPickerDelegate(TestCase):
@min_sdk_level("10.9")
def test_protocols(self):
self.assertProtocolExists("ABPersonPickerDelegate")
|
import matplotlib.pyplot as plt
from random_walk import RandomWalk
while True:
rw=RandomWalk()
rw.fill_walk()
point_numbers=list(range(rw.num_points))
plt.scatter(rw.x_values,rw.y_values,c=point_numbers,cmap=plt.cm.Blues,edgecolor='none',s=15)
plt.show()
keep_running=input("Make another walk? (y/n)")
if keep_running=='n':
break |
import getopt
import os
import re
import sys
dupsFile = "dups.txt"
filepath = None
toRemoveDupFile = True
toDryRun = False
opts, args = getopt.getopt(sys.argv[1:], "p:rd")
for o, a in opts:
if o == "-p":
filepath = a
elif o == "-r":
toRemoveDupFile = False
elif o == "-d":
toDryRun = True
if filepath == None:
print("Error: Please provide path using the -p option")
exit()
os.system("findimagedupes -R \"{}\" > \"{}\"".format(filepath, dupsFile))
def deleteAllButLargestAndOldest(filepaths):
maxSize = 0
maxFilepaths = []
for filepath in filepaths:
size = os.stat(filepath).st_size
if (size > maxSize):
maxSize = size
for filepath in filepaths:
size = os.stat(filepath).st_size
if(size < maxSize):
print("D:", filepath)
if toDryRun == False:
os.remove(filepath)
if (size == maxSize):
maxFilepaths.append(filepath)
if (len(maxFilepaths) > 1):
oldestModifiedTime = 0
for maxFilepath in maxFilepaths:
modifiedTime = os.stat(filepath).st_mtime
if (modifiedTime < oldestModifiedTime):
oldestModifiedTime = modifiedTime
for maxFilepath in maxFilepaths:
modifiedTime = os.stat(filepath).st_mtime
if(modifiedTime > oldestModifiedTime):
print("D:", maxFilepath)
if toDryRun == False:
os.remove(maxFilepath)
with open(dupsFile, 'r') as fp:
for cnt, line in enumerate(fp):
matches = re.findall("(?:(.*?(?:jpg|png|gif))[\s]{0,1})+?", line)
deleteAllButLargestAndOldest(matches)
if toRemoveDupFile:
os.remove(dupsFile)
|
#!/usr/bin/env python
import os
import shutil
import subprocess
import time
import click
from qgreenland.constants import (INPUT_DIR,
RELEASES_DIR,
TaskType,
WIP_DIR,
ZIP_TRIGGERFILE)
BOOLEAN_CHOICE = click.Choice(['True', 'False'], case_sensitive=False)
def _rmtree(directory, *, retries=3):
"""Add robustness to shutil.rmtree.
Retries in case of intermittent issues, e.g. with network storage.
"""
if os.path.isdir(directory):
for i in range(retries):
try:
shutil.rmtree(directory)
return
except OSError as e:
print(f'WARNING: shutil.rmtee failed for path: {directory}')
print(f'Exception: {e}')
print(f'Retrying in {i} seconds...')
time.sleep(i)
# Allow caller to receive exceptions raised on the final try
shutil.rmtree(directory)
def cleanup_intermediate_dirs():
"""Delete all intermediate data, except maybe 'fetch' dir."""
if os.path.isfile(ZIP_TRIGGERFILE):
os.remove(ZIP_TRIGGERFILE)
for task_type in TaskType:
if task_type != TaskType.FETCH:
_rmtree(task_type.value)
if os.path.isdir(WIP_DIR):
for x in os.listdir(WIP_DIR):
if x.startswith('tmp'):
_rmtree(x)
def _validate_boolean_choice(_ctx, _param, value):
if value == 'True':
return True
if value == 'False':
return False
raise click.BadParameter(
f'Expected "True" or "False"; Received "{value}"'
)
def _validate_ambiguous_command(kwargs):
"""Validate for conflicting options and suggest a fix."""
msg = (
'Ambiguous command! You have requested both to delete all'
' {resource}s _and_ to delete {resource}s by layer ID. Please choose'
' only one.'
)
if kwargs['delete_all_wip'] and kwargs['delete_wips_by_pattern']:
raise click.UsageError(msg.format(resource='WIP'))
if kwargs['delete_all_input'] and kwargs['delete_inputs_by_pattern']:
raise click.UsageError(msg.format(resource='input'))
return kwargs
def print_and_run(cmd, *, dry_run):
print(cmd)
if not dry_run:
subprocess.run(
cmd,
shell=True,
check=True,
executable='/bin/bash' # /bin/sh doesn't support brace expansion
)
@click.command(context_settings={'help_option_names': ['-h', '--help']})
@click.option('dry_run', '--dry-run', '-d',
help="Print commands, but don't actually delete anything.",
is_flag=True)
@click.option('delete_inputs_by_pattern', '--delete-inputs-by-pattern', '-i',
help=(
'Bash glob/brace pattern used to delete input datasources by'
' `<dataset_id>.<source_id>`'
),
multiple=True)
@click.option('delete_wips_by_pattern', '--delete-wips-by-pattern', '-w',
help=(
'Pattern used to delete WIP layers by layer ID'
), multiple=True)
@click.option('delete_all_input', '--delete-all-input', '-I',
help=(
'Delete _ALL_ input-cached layers, ignoring LAYER_ID_PATTERN'
),
type=BOOLEAN_CHOICE, callback=_validate_boolean_choice,
default='False', show_default=True)
@click.option('delete_all_wip', '--delete-all-wip', '-W',
help=(
'Delete _ALL_ WIP layers, ignoring LAYER_ID_PATTERN'
),
type=BOOLEAN_CHOICE, callback=_validate_boolean_choice,
default='False', show_default=True)
@click.option('delete_compiled', '--delete-compiled', '-C',
help=(
'Delete compiled (but not zipped) QGreenland datapackage'
),
type=BOOLEAN_CHOICE, callback=_validate_boolean_choice,
default='True', show_default=True)
# TODO: delete_all_wip_tmp: Deletes dirs like `transform-luigi-tmp-4765361527/`
# from wip
# TODO: delete_all_dev_releases?
@click.option('delete_all_releases', '--delete-all-releases', '-R',
help=(
'Delete all zipped QGreenland releases'
),
type=BOOLEAN_CHOICE, callback=_validate_boolean_choice,
default='False', show_default=True)
# NOTE: Complexity check (C901) is disabled because this function is just a big
# set of switches by design!
def cleanup_cli(**kwargs): # noqa: C901
"""Clean up input, WIP, and/or output data created by QGreenland.
By default, clean up the compiled (but not zipped) datapackage.
"""
_validate_ambiguous_command(kwargs)
if kwargs['dry_run']:
print('WARNING: In DRY RUN mode. Nothing will be deleted.')
print()
if wip_patterns := kwargs['delete_wips_by_pattern']:
for p in wip_patterns:
print_and_run(
f'rm -rf {TaskType.WIP.value}/{p}',
dry_run=kwargs['dry_run']
)
if inp_patterns := kwargs['delete_inputs_by_pattern']:
for p in inp_patterns:
print_and_run(
f'rm -rf {INPUT_DIR}/{p}',
dry_run=kwargs['dry_run']
)
if kwargs['delete_all_input']:
print_and_run(
f'rm -rf {INPUT_DIR}/*',
dry_run=kwargs['dry_run']
)
if kwargs['delete_all_wip']:
print_and_run(
f'rm -rf {TaskType.WIP.value}/*',
dry_run=kwargs['dry_run']
)
if kwargs['delete_compiled']:
print_and_run(
f'rm -rf {TaskType.FINAL.value}/*',
dry_run=kwargs['dry_run']
)
# The triggerfile tells Luigi tasks to zip the compiled data. Can't do
# that if we just deleted it!
if os.path.isfile(ZIP_TRIGGERFILE):
print_and_run(
f'rm {ZIP_TRIGGERFILE}',
dry_run=kwargs['dry_run']
)
if kwargs['delete_all_releases']:
print_and_run(
f'rm -rf {RELEASES_DIR}/*',
dry_run=kwargs['dry_run']
)
if __name__ == '__main__':
cleanup_cli()
|
'''
Created on Nov 14, 2012
@author: io
'''
import socket
import threading
import SocketServer
from SocketServer import ThreadingMixIn
from Queue import Queue
import threading, socket
class ThreadPoolMixIn(ThreadingMixIn):
'''
use a thread pool instead of a new thread on every request
code from : http://code.activestate.com/recipes/574454-thread-pool-mixin-class-for-use-with-socketservert/
'''
allow_reuse_address = True # seems to fix socket.error on portal2 restart
numThreads = None
def __init__(self, numThreads):
''' Sets up the threadPool and "fills" it with the threads. '''
self.numThreads = numThreads
self.requests = Queue(self.numThreads)
for n in range(self.numThreads):
t = threading.Thread(target = self.process_request_thread)
t.setDaemon(True)
t.start()
def process_request(self, request, client_address):
''' Simply collect requests and put them on the queue for the workers. '''
self.requests.put((request, client_address))
def process_request_thread(self):
''' Obtains request and client_address from the queue instead of directly from a call '''
# The thread starts and stays on this loop.
# The method call hangs waiting until something is inserted into self.requests
# and .get() unblocks
while True:
ThreadingMixIn.process_request_thread(self, *self.requests.get())
# http://docs.python.org/tut/node6.html#SECTION006740000000000000000
'''
def handle_request(self):
simply collect requests and put them on the queue for the workers.
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.requests.put((request, client_address))
'''
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(1024)
cur_thread = threading.current_thread()
#response = "{}: {}".format(cur_thread.name, data)
response = []
response.append("HTTP/1.0 200 OK\r\n")
response.append("Content-Length: 12\r\n")
response.append("\r\n")
response.append("[%s] - " % (cur_thread))
self.request.sendall(''.join(response))
class ThreadedTCPServer(ThreadPoolMixIn, SocketServer.TCPServer):
pass
class ThreadingPoolTCPServer(ThreadPoolMixIn, SocketServer.TCPServer):
"""Calls the __init__ from both super."""
def __init__(self, server_address, RequestHandlerClass, numThreads, bind_and_activate=True):
ThreadPoolMixIn.__init__(self, numThreads)
SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass)
def client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(message)
response = sock.recv(1024)
print "Received: {}".format(response)
finally:
sock.close()
def server():
# Port 0 means to select an arbitrary unused port
HOST, PORT = "localhost", 8000
numThreads = 100
server_obj = ThreadingPoolTCPServer((HOST, PORT), ThreadedTCPRequestHandler, numThreads)
ip, port = server_obj.server_address
# Start a thread with the portal2 -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server_obj.serve_forever)
# Exit the portal2 thread when the main thread terminates
#server_thread.daemon = True
server_thread.start()
print "Server loop running in thread:", server_thread.name
return server_obj
if __name__ == "__main__":
server_obj = server()
'''
ip, port = "localhost", 8000
client(ip, port, "Hello World 1")
client(ip, port, "Hello World 2")
client(ip, port, "Hello World 3")
client(ip, port, "Hello World 3")
client(ip, port, "Hello World 3")
client(ip, port, "Hello World 3")
client(ip, port, "Hello World 3")
'''
#server_obj.shutdown()
|
from socket import *
import json
s = socket(AF_INET, SOCK_STREAM)
s.connect(('localhost', 8881))
while True:
msg = input('')
s.send(msg.encode()) |
from django.contrib import admin
from . import models
# class ELement():
# pass
# class Address():
# fk = Element()
class AddressInline(admin.TabularInline):
model = models.ElementAddress
class MenuInline(admin.TabularInline):
model = models.MenuCat
class FoodInline(admin.TabularInline):
model = models.Food
class MenuAdmin(admin.ModelAdmin):
inlines = [
FoodInline,
]
class ElementAdmin(admin.ModelAdmin):
inlines = [
AddressInline, MenuInline,
]
admin.site.register(models.Element, ElementAdmin)
# admin.site.register(models.ElementAddress)
admin.site.register(models.Category)
admin.site.register(models.SubCategory)
admin.site.register(models.MenuCat, MenuAdmin)
admin.site.register(models.Food)
admin.site.register(models.Account)
admin.site.register(models.Supplier)
admin.site.register(models.Product)
|
"""
Let us say your expense for every month are listed below,
January - 2200
February - 2350
March - 2600
April - 2130
May - 2190
Create a list to store these monthly expenses and using that find out,
"""
exp1=[2200,2350,2600,2130,2190]
print('Your list has\n',exp1)
#1. In Feb, how many dollars you spent extra compare to January?
print('1.1 No of dollars you spent more in Feb compare to Jan:',exp1[1]-exp1[0],'$')
#2. Find out your total expense in first quarter (first three months) of the year.
print('\n1.2 Total expenses in first quarter:',exp1[0]+exp1[1]+exp1[2])
#3. Find out if you spent exactly 2000 dollars in any month
spent=2000
try:
result=exp1.index(2000)
except ValueError as e:
result=-1
if result ==-1:
print('\n1.3 No match found with ',spent,'$ in the list')
else:
print('\n1.3 Match found with ',spent,'$ in the list')
print('\n1.3 Smart Solution is ')
print('Did i spend 2000$ in any month?',2000 in exp1)
#4. June month just finished and your expense is 1980 dollar. Add this item to our monthly expense list
exp1.append(1980) #Append will add at end of ur list
print('\n1.4 After june month expenses, ur list has\n',exp1)
#5. U returnd an item that u bought in April and got refund of 200$. Make correction to ur monthly expense list based on this
exp1[3]=exp1[3]-200
print('\n1.5 After April month refund, your list has\n',exp1)
"""
You have a list of your favourite marvel super heros.
heros=['spider man','thor','hulk','iron man','captain america']
Using this find out below,
"""
heros=['spider man','thor','hulk','iron man','captain america']
print('\n#### 2 Qos')
print('List has',heros)
#1. Length of the list
len1= len(heros)
print('2.1 Length of the list',str(len(heros)))
#2. Add 'black panther' at the end of this list
heros.append('black panther')
print('\n2.2 After adding BLACK PANTHER to list',heros)
#3. You realize that you need to add 'black panther' after 'hulk',so remove it from the list first and then add it after 'hulk'
hulk_index=heros.index('hulk')
heros.remove('black panther')
heros.insert(hulk_index+1,'black panther')
print('\n2.3. After updating BLACK PANTHER position in a list',heros)
#4. Now you don't like thor and hulk because they get angry easily :)
# So you want to remove thor and hulk from list and replace them with doctor strange (because he is cool).
# Do that with one line of code.
heros[1:3]=['doctor strange']
print('\n2.4 thor and hulk to doctor strange\n',heros)
#5. Sort the heros list in alphabetical order (Hint. Use dir() functions to list down all functions available in list)
heros.sort()
print('\n2.5 sorted',heros) |
import datetime
import pickle
import sys
import os
import bs4 as bs
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy
import requests
# from matplotlib import style
import plotly.graph_objs as go
# from matplotlib.finance import candlestick_ohlc
import plotly.plotly as py
import plotly.graph_objs as go
import pandas as pd
from Utils import read_tickers
filepath = 'C:\\Users\\Tom\\OneDrive\\Dokumente\\Thomas\\Aktien\\'
stock_list_name = "stockList.txt"
stocks_to_buy_name = "StocksToBuy.CSV"
excel_file_name = '52W-HochAutomatisch_Finanzen.xlsx'
tickers_file_name = "tickers.pickle"
tickers_file = filepath + tickers_file_name
def read_and_save_sp500_tickers(tickers_file):
"""
read the sp500 tickers and saves it to given file
:param tickers_file: file to save the sp500 tickers
:return: nothing
"""
resp = requests.get('https://de.wikipedia.org/wiki/Liste_der_im_CDAX_gelisteten_Aktien')
soup = bs.BeautifulSoup(resp.text, 'lxml')
table = soup.find('table', {'class': 'wikitable sortable zebra'})
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[2].text
tickers.append(ticker)
with open(tickers_file, "wb") as f:
pickle.dump(tickers, f)
#read_and_save_sp500_tickers(tickers_file)
read_tickers(tickers_file, True) |
import unittest
from cgi.member import Member
class PersonClassTest(unittest.TestCase):
def member_exist(self):
m = Member()
self.assertIsNotNone(m)
# TODO make test for add_to_member_table
# TODO make test for create_member
if __name__ == '__main__':
unittest.main() |
import pymongo
client = pymongo.MongoClient("localhost", 27017)
# db name - aminer
db = client.acm_aminer
# collection
db.shortened
print "DB name: ", db.name
print "DB collection: ", db.publications
print "[INFO] Processing papers"
file = open("../data/ACM_Aminer.txt")
lines = file.readlines()
file.close()
papers = {}
i = 0
while i < len(lines) :
paper = {}
paper['references'] = []
while lines[i] != '\n' :
line = lines[i].strip()
'''
#index ---- index id of this paper
#* ---- paper title
#@ ---- authors (separated by semicolons)
#o ---- affiliations (separated by semicolons, and each affiliaiton corresponds to an author in order)
#t ---- year
#c ---- publication venue
#% ---- the id of references of this paper (there are multiple lines, with each indicating a reference)
#! ---- abstract
'''
if line.startswith('#index') : paper['index'] = line[len('#index'):]
if line.startswith('#*') :
i += 1
continue #paper['title'] = line[len('#*'):]
if line.startswith('#@') : paper['authors'] = line[len('#@'):].split(',')
if line.startswith('#o') :
i += 1
continue #paper['affiliations'] = line[len('#o'):]
if line.startswith('#t') : paper['year'] = line[len('#t'):]
if line.startswith('#c') : paper['publication'] = line[len('#c'):]
if line.startswith('#!') :
i += 1
continue#paper['abstract'] = line[len('#!'):]
if line.startswith('#%') : paper['references'].append( line[len('#%'):] )
print "line",i+1,"done"
i += 1
db.publications.insert_one(paper)
print "[INFO] inserted into db paper", paper['index']
i += 1
#file.close()
|
import os
import sys
from stve.log import LOG as L
from stve.cmd import run
from stve.script import StveTestCase
from stve.exception import *
from nose.tools import with_setup, raises, ok_, eq_
try:
import configparser
except:
import ConfigParser as configparser
LIB_PATH = os.path.dirname(os.path.abspath(__file__))
if not LIB_PATH in sys.path:
sys.path.insert(0, LIB_PATH)
from runner import TestStveTestRunner as TSTR
class TestAndroidTestRunner(TSTR):
def get_serial(self):
conf = os.path.join(self.root, "data", "config.ini")
print(conf)
if not os.path.exists(conf):
serial = run("adb get-serialno")[1].splitlines()[-1]
else:
try:
config = configparser.ConfigParser()
config.read(conf)
serial = config.get("adb", "serial")
except Exception as e:
print(str(e))
return serial
def get_apk_path(self):
return os.path.join(
self.bin_path, "apk", "aura")
def get_jar_path(self):
return os.path.join(
self.bin_path, "jar", "aubs")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_01(self):
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_01.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_02(self):
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_02.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_03(self):
serial = self.get_serial()
StveTestCase.set("android.serial", serial)
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_03.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_04(self):
serial = self.get_serial()
StveTestCase.set("android.serial", serial)
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_04.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_05(self):
serial = self.get_serial()
StveTestCase.set("android.serial", serial)
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_05.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_06(self):
serial = self.get_serial()
StveTestCase.set("android.serial", serial)
StveTestCase.set("android.apk", self.get_apk_path())
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_06.py")
@with_setup(TSTR.setup, TSTR.teardown)
def test_library_execute_android_success_07(self):
serial = self.get_serial()
StveTestCase.set("android.serial", serial)
StveTestCase.set("android.jar", self.get_jar_path())
self.script_path = os.path.join(self.script_path, "android")
self.base_library_execute_success("android_07.py")
|
from itertools import chain, repeat
import pprint
from textwrap import dedent
import unittest
from clojure import requireClj
from py.clojure.lang.compiler import Compiler
from py.clojure.lang.fileseq import StringReader
from py.clojure.lang.globals import currentCompiler
from py.clojure.lang.lispreader import read
import py.clojure.lang.rt as RT
from py.clojure.lang.symbol import Symbol
from py.clojure.util.byteplay import Code, Label, SetLineno
requireClj('./clj/clojure/core.clj')
class NonOverloadedFunctions(unittest.TestCase):
def setUp(self):
RT.init()
self.comp = Compiler()
currentCompiler.set(self.comp)
self.comp.setNS(Symbol.intern('clojure.core'))
def testZeroArguments(self):
actual = self.compileActual('(defn abc [] 2)')
expected = self.compileExpected('''
def abc():
return 2''')
items = [(a == e, a, e) for a, e in self.zipActualExpected(actual, expected)]
try:
assert all(item[0] for item in items)
except AssertionError:
pprint.pprint(items)
def testOneArgument(self):
actual = self.compileActual('(defn abc ([x] x))')
expected = self.compileExpected('''
def abc(x):
return x''')
items = [(a == e, a, e) for a, e in self.zipActualExpected(actual, expected)]
try:
assert all(item[0] for item in items)
except AssertionError:
pprint.pprint(items)
def testMultipleArguments(self):
actual = self.compileActual('(defn abc ([x] x) ([x y] y))')
expected = self.compileExpected('''
def abc(*__argsv__):
if __argsv__.__len__() == 1:
x = __argsv__[0]
return x
elif __argsv__.__len__() == 2:
x = __argsv__[0]
y = __argsv__[1]
return y
raise Exception()''')
# There's a slight different between clojure-py's compiled code and
# Python's: clojure-py produces (LOAD_CONST, <type 'exceptions.Exception'>)
# while Python produces (LOAD_CONST, 'Exception'). Just ignore it; it's
# not what we're testing here.
# Also the last to two bytecodes generated by Python are to load None
# and return it, which isn't necessary after raising an exception.
items = [(a == e, a, e) for a, e in self.zipActualExpected(actual, expected[:-2]) if e[1] != 'Exception']
try:
assert all(item[0] for item in items)
except AssertionError:
pprint.pprint(items)
def zipActualExpected(self, actual, expected):
difference = len(expected) - len(actual)
return zip(chain(actual, repeat(None, difference)),
chain(expected, repeat(None, -difference)))
def compileActual(self, code):
r = StringReader(code)
s = read(r, True, None, True)
res = self.comp.compile(s)
fn = self.comp.executeCode(res)
return [c for c in Code.from_code(fn.func_code).code[:] if c[0] is not SetLineno]
def compileExpected(self, code):
codeobject = compile(dedent(code), 'string', 'exec')
globs = {}
result = eval(codeobject, {}, globs)
return [c for c in Code.from_code(globs['abc'].func_code).code[:] if c[0] is not SetLineno]
class TruthinessTests(unittest.TestCase):
def setUp(self):
RT.init()
self.comp = Compiler()
currentCompiler.set(self.comp)
self.comp.setNS(Symbol.intern('clojure.core'))
def testTrue(self):
self.assertTrue(self.eval('(if true true false)'))
def testList(self):
self.assertTrue(self.eval('(if \'() true false)'))
self.assertTrue(self.eval('(if \'(1) true false)'))
def testVector(self):
self.assertTrue(self.eval('(if [] true false)'))
self.assertTrue(self.eval('(if [1] true false)'))
def testMap(self):
self.assertTrue(self.eval('(if {} true false)'))
self.assertTrue(self.eval('(if {1 2} true false)'))
@unittest.skip # hash sets aren't implemented yet
def testSet(self):
self.assertTrue(self.eval('(if #{} true false)'))
self.assertTrue(self.eval('(if #{1} true false)'))
def testNil(self):
self.assertFalse(self.eval('(if nil true false)'))
self.assertFalse(self.eval('(if None true false)'))
def testFalse(self):
self.assertFalse(self.eval('(if false true false)'))
def eval(self, code):
r = StringReader(code)
s = read(r, True, None, True)
res = self.comp.compile(s)
return self.comp.executeCode(res)
class PyNamespaceTests(unittest.TestCase):
def setUp(self):
RT.init()
self.comp = Compiler()
currentCompiler.set(self.comp)
self.comp.setNS(Symbol.intern('clojure.core'))
def testBuiltinsNamespaced(self):
self.assertEqual(self.eval('(py/str [1 2 3])'), '[1 2 3]')
self.assertEqual(self.eval('(py/list "abc")'), ['a', 'b', 'c'])
self.assertEqual(self.eval('((py/getattr "namespace" "__len__"))'), 9)
def testBuiltinsNotIncluded(self):
self.assertRaises(NameError, self.eval, '(str [1 2 3])')
self.assertRaises(NameError, self.eval, '(getattr [1 2 3] "pop")')
def eval(self, code):
r = StringReader(code)
s = read(r, True, None, True)
res = self.comp.compile(s)
return self.comp.executeCode(res)
|
from django import forms
from django.core.validators import validate_email, FileExtensionValidator
from .validators import validate_filesize, validate_phone_number
from django.conf import settings
class AuthenticationForm(forms.Form):
# This form is used in login.html page. Takes input username and password.
user_name = forms.CharField(strip=True)
password = forms.CharField(widget=forms.PasswordInput)
class DetailsForm(forms.Form):
# This form is used in details.html page.
# Takes all the necessary inputs specified in the assignment description.
# Handles specified restrictions. Custom validators are written in validators.py
name = forms.CharField(max_length=256, strip=True)
email = forms.EmailField(max_length=256, widget=forms.EmailInput, validators=[validate_email])
phone = forms.CharField(max_length=14, validators=[validate_phone_number])
full_address = forms.CharField(max_length=512)
name_of_university = forms.CharField(max_length=256)
graduation_year = forms.IntegerField(max_value=2020, min_value=2015)
cgpa = forms.FloatField(max_value=4.0, min_value=2.0, required=False)
experience_in_months = forms.IntegerField(max_value=100, min_value=0, required=False)
current_work_place_name = forms.CharField(max_length=256, required=False)
applying_in = forms.ChoiceField(choices=settings.APPLY_CHOICES)
expected_salary = forms.IntegerField(max_value=60000, min_value=15000)
field_buzz_reference = forms.CharField(max_length=256, required=False)
github_project_url = forms.URLField(max_length=512)
cv_file = forms.FileField(widget=forms.FileInput, allow_empty_file=False, validators=[FileExtensionValidator(allowed_extensions=['pdf']), validate_filesize]) |
__all__ = ["logging", "set_global_seed", "typing"]
from move.core import logging, typing
from move.core.seed import set_global_seed
|
# -*- coding: utf-8 -*-
import StringIO
import json
import logging
import random
import urllib
import urllib2
# for sending images
from PIL import Image
import multipart
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
import splitter
import sender
import dictionary
TOKEN = open('private/token','rb').read().strip()
URL = 'https://api.telegram.org/bot'
# Maximum length of text message in Telegram
MAX = 4000
d = dictionary.Dic('web1913')
feedqueue = []
class EnableStatus(ndb.Model):
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False)
def setEnabled(chat_id, yes):
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = yes
es.put()
def getEnabled(chat_id):
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(URL + TOKEN + '/' + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(URL + TOKEN + '/' + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(URL + TOKEN + '/' + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('Request body: \n\n %s' % body)
self.response.write(json.dumps(body))
update_id = body['update_id']
message = body['message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
def reply(text=None):
if text:
resp = ''
for part in splitter.split(text, MAX):
resp += part
check = sender.send(
'POST',
URL + TOKEN + '/' + 'sendMessage',
{
'text': part.encode('utf-8'),
'chat_id': chat_id
}
)
if check:
logging.error(check.read())
else:
logging.info('Sent response: \n\n%s' % resp)
else:
sender.send(
'POST',
URL + TOKEN + '/' + 'sendMessage',
{
'text': 'Bot tries to send you an empty message.',
'chat_id': chat_id
}
)
def feedback():
check = sender.send(
'POST',
URL + TOKEN + '/' + 'forwardMessage',
{
'chat_id': 926288,
'from_chat_id': chat_id,
'message_id': message_id
}
)
if check:
logging.error(check.read())
else:
logging.info('Forwarded as a feedback:\n\n%s' % text)
if text.startswith('/'): # Commands start with /
if text == '/start':
reply('Bot is serving.')
setEnabled(chat_id, True)
elif text == '/stop':
reply('Bot ceased serving.')
setEnabled(chat_id, False)
elif text == '/info':
reply(u'@WebstersBot defines English words for you.\n\nDefinitions are taken from Webster’s Revised Unabridged Dictionary (1913).\n\nTry to look up an ordinary word to unravel finer, vivid and sometimes rare (or obsolete) definitions.')
elif text == '/help':
reply(u'Try sending an English word to get it’s weakly formated definition.')
elif text.startswith('/feedback ') or text.startswith('/feedback'):
if text == '/feedback':
global feedqueue
reply(u'Write your message below.')
feedqueue += [chat_id]
else:
feedback()
reply(u'Thank you. Your message has been sent.')
else:
reply('Not a command yet.')
elif chat_id in feedqueue:
reply(u'Thank you for your feedback. Your message has been sent.')
feedback()
feedqueue.remove(chat_id)
else:
if getEnabled(chat_id):
try:
response = d[text.capitalize()]
if response:
reply(response)
else:
reply(u'Definition of “%s” cannot be found.' % text)
except Exception, e:
reply('Error.')
logging.error('Error: %s' % e)
else:
logging.info('Not enabled for chat_id %s' % chat_id)
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True) |
#!/home/jupyter/py-env/python2.7.13/bin/python2.7
import theano
import theano.tensor as T
from theano import pp
x = T.dmatrix('x') # declare variable
y = T.sum(1 / (1 + T.exp(-x)))
dy = T.grad(y,x)
f = theano.function([x], dy) # compile function
print f([[-1,0],[1,2]])
|
'''
Implementation of ZMP-based walking pattern generation.
However, the offset tracking error of ZMP in a long distance walking pattern is observed in this method.
For more details, refer to "Introduction to Humanoid Robotics" by Shuuji Kajita.
Background:
ZMP (Zero Moment Point) is a method for making a walking robot keep balance. The goal of this implementation
is to deduce the trajectory of CoM with given ZMPs.
'''
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import solve_discrete_are
class WalkPattern3DGenerator(object):
def __init__(self, sx = 0.3, sy = 0.1, N_step = 5, T_step = 1, dT = 5e-3, zc = 0.8, q = 1, r = 1e-6, g = 9.81, N = 320):
'''
Parameters for generating ZMP patterns:
sx: The step length in x-axis.
sy: The step length in y-axis.
N_step: Total number of steps (Taking one step by either left or right foot).
T_step: Duration (seconds) of each step.
dT: Sampling time (seconds).
'''
self.sx = sx
self.sy = sy
self.N_step = N_step
self.T_step = T_step
self.dT = dT
'''
Parameters for the preview control system:
dT: Sampling time (seconds).
zc: The height of CoM.
q, r: Positive weights.
g: Gravity.
N: Number of look-ahead samples in preview controller.
'''
self.zc = zc
self.q = q
self.r = r
self.g = g
self.N = N
def generate(self, x0 = 0, dx0 = 0, d2x0 = 0, y0 = 0, dy0 = 0, d2y0 = 0):
'''
Input:
x0, dx0, d2x0: The state vector [distance, velocity, acceleration] on x-axis.
y0, dy0, d2y0: The state vector [distance, velocity, acceleration] on y-axis.
Generate the trajectory of CoM that keeps the robot walking. The steps are:
1. Create the parameter control system.
2. Generate the ZMPs.
3. Compute the trajectory of CoM and verify the ZMPs.
'''
# Create the controller.
A, B, C = self.create_system(self.dT, self.zc, self.g)
K, Fs = self.create_controller(A, B, C, self.r, self.q, self.N)
# Generate the ZMPs.
pref = self.create_ZMP_pattern(self.sx, self.sy, self.N_step, self.T_step, self.dT)
prefx, prefy = pref[:, 0], pref[:, 1]
# With given ZMPs, compute the trajectory of CoM.
Xs, Pxs = self.solve_system(A, B, C, K, Fs, pref[:, 0], self.N, x0, dx0, d2x0)
Ys, Pys = self.solve_system(A, B, C, K, Fs, pref[:, 1], self.N, y0, dy0, d2y0)
return Xs, Ys, Pxs, Pys, prefx, prefy
def create_ZMP_pattern(self, sx, sy, N_step, T_step, dT):
'''
Generate ZMP positions with given parameters.
The trajectories:
X-axis:
|----
|----|
----|
Y-axis:
|--|
--| | | |--
|--| |--|
'''
# Number of samples per step.
N_sample = int(T_step / dT)
# Idle in the first and last step.
patterns = np.empty([(N_step + 2) * N_sample, 2], dtype = np.float32)
# Idle in the first step
patterns[:N_sample, :] = 0, 0
# Walk
xd, yd = 0, 0
for s in range(1, N_step + 1):
xd = (s - 1) * sx
yd = (-1) ** s * sy
patterns[s * N_sample:(s + 1) * N_sample, :] = xd, yd
# Idle in the last step
patterns[(N_step + 1) * N_sample:, :] = xd, 0
return patterns
def create_system(self, dT, zc, g):
'''
Output:
A, B, C: The matrices for the digital controller.
'''
A = np.array([
[1, dT, dT**2/2],
[0, 1, dT],
[0, 0, 1]
])
B = np.array([[dT**3/6, dT**2/2, dT]]).T
C = np.array([[1, 0, -zc/g]])
return A, B, C
def create_controller(self, A, B, C, r, q, N):
'''
The controller is considered as a tracking control problem minimizing the performance
J = \sum_j^\infty {Q (pref_j - p_j)**2 + R u_j**2}
Output:
K and Fs: For creating inputs that minimize J.
'''
# P: Solution of the Riccati equation.
R = r * np.eye(1)
Q = q * C.T @ C
P = solve_discrete_are(A, B, Q, R)
# Create K and Fs
tmp = np.linalg.inv(R + B.T @ P @ B) @ B.T
K = tmp @ P @ A
Fs = []
pre = np.copy(tmp)
AcT = (A - B @ K).T
for n in range(N):
Fs.append(pre @ C.T * q)
pre = pre @ AcT
Fs = np.array([Fs]).flatten()
return K, Fs
def update_state(self, A, B, C, X, U):
'''
Output:
X_next: The next state vector.
P_k: The current ZMP position.
'''
X_next = A @ X + B @ U
P_k = C @ X
return X_next, P_k
def solve_system(self, A, B, C, K, Fs, pref, N, x0, dx0, d2x0):
'''
Output:
Xs: The state vector and ZMP in all sampling time.
ZMPs: The prediction of ZMPs.
'''
# The initial state vector (all zeros by default).
X = np.array([x0, dx0, d2x0]).T
n_zmps = len(pref)
pref_tmp = np.append(pref, [pref[-1]] * (N - 1))
# Go over all samples.
Xs, ZMPs = np.zeros(n_zmps), np.zeros(n_zmps)
for i in range(n_zmps):
U = -np.dot(K, X) + np.dot(Fs, pref_tmp[i:i + N])
X, ZMP = self.update_state(A, B, C, X, U)
Xs[i], ZMPs[i] = X[0], ZMP
return Xs, ZMPs
if __name__ == '__main__':
generator = WalkPattern3DGenerator()
Xs, Ys, Pxs, Pys, zmprefx, zmprefy = generator.generate()
plt.plot(range(len(Xs)), Xs, label = 'CoM X')
plt.plot(range(len(Pxs)), Pxs, label = 'Pred ZMP x')
plt.plot(range(len(zmprefx)), zmprefx, label = 'Ref ZMP x')
plt.legend()
plt.show()
plt.plot(range(len(Ys)), Ys, label = 'CoM Y')
plt.plot(range(len(Pys)), Pys, label = 'Pred ZMP y')
plt.plot(range(len(zmprefy)), zmprefy, label = 'Ref ZMP y')
plt.legend()
plt.show() |
temp = input("请输入一个年份:")
while not temp.isdigit(): #isdigit() 判断输入是否为全数字,是返回ture,不是返回FALSE
temp = input("抱歉,您的输入有误,请重新输入:")
year = int(temp)
if year/400 == int(year/400):
print(temp + '是闰年!')
|
def read_file_return_list(name):
list = []
with open(name,'r') as f:
for line in f:
line = line.split('\n')
list.append(line[0])
if 'str' in line:
break
return list
def triangle_to_dict(triangle):
tri_dict = {}
row_count = 0
for i in triangle:
tri_dict.update({row_count: i.split(' ')})
row_count = row_count + 1
return tri_dict
def find_max_path_sum(tri_dict):
end = max(tri_dict.keys()) + 1
for row in range(end-2, -1, -1):
for index in range(len(tri_dict[row])):
(tri_dict[row])[index] = int((tri_dict[row])[index]) + max(int((tri_dict[row+1])[index+1]),int((tri_dict[row+1])[index]))
return tri_dict[0]
if __name__ == "__main__":
triangle = read_file_return_list('67_input.dat')
tri_dict = triangle_to_dict(triangle)
print find_max_path_sum(tri_dict)
|
import os,sys
import pandas as pd
import numpy as np
import tempfile
import re
from glob import glob
import mdtraj as md
import prody
import getopt
sys.path.append(os.path.dirname(os.path.dirname(sys.path[0])))
from util.createfolder import try_create_chain_parent_folder
from av4.av4_atomdict import atom_dictionary
def get_similar_crystal_file(crystalFolder,ligandPath):
'''
compare the ligand with all the crystal ligand for the target receptor
return the path of the crystal ligand which have a high similarity
:param crystalFolder: folder store crystal ligands
:param ligandPath: path for docking result
:return: list of path
'''
file_name= os.path.basename(ligandPath)
temp_file_path = os.path.join(FLAGS.tempPath,file_name)
print 'obabel convert ',file_name
obabel_cmd = 'obabel -ipdb {} -l 1 -f 1 -opdb -O {}'.format(ligandPath,temp_file_path)
os.system(obabel_cmd)
crystalList = glob(os.path.join(crystalFolder,'*.pdb'))
records = []
for crystal_ligand in crystalList:
command = os.popen('babel -d {} {} -ofpt -xfFP4'.format(crystal_ligand,temp_file_path))
ls = command.read()
try:
tanimoto_similarity = re.split('=|\n', ls)[2]
if tanimoto_similarity>FLAGS.tanimoto_cutoff:
records.append(crystal_ligand)
except:
records.append(crystal_ligand)
return records
def parsePDB(file_path):
'''
given path of the docking result
:param file_path:
:return:
'''
lig = prody.parsePDB(file_path)
coords = lig.getCoordsets()
elements = lig.getElements()
remarks = [line for line in open(file_path) if line[:6] == 'REMARK']
affinity = [float(re.search('(-?\d+.\d+)',reamrk).group()) for reamrk in remarks]
if coords.shape[0] != len(affinity):
message = "{} parse error, frame {}, affinity {}".format(file_path,coords.shape[0],len(affinity))
raise(message)
return coords,elements,affinity
def overlap_filter(crystal_list, ligand_coords, ligand_affinity):
for crystal_ligand_path in crystal_list:
crystal_ligand = prody.parsePDB(crystal_ligand_path)
# the shape of crystal coords is [1,n,3]
# coordinate read by mdtraj is 10 time less than
# literally so we times it by 10
# [n,3]
crysta_coords = crystal_ligand.getCoords()
# [x,y,1,1,3]
exp_ligand_coord = np.expand_dims(np.expand_dims(ligand_coords, -2), -2)
print "exp_ligand_coord ",exp_ligand_coord.shape
# [x,y,1,n,3]
diff = exp_ligand_coord - crysta_coords
print "diff ",diff.shape
# [x,y,n]
distance = np.squeeze(np.sqrt(np.sum(np.square(diff),-1)))
print "distance ",distance.shape
# [x,y]
atom_overlap =(np.sum((distance<FLAGS.clash_cutoff_A).astype(np.float32),-1)>0).astype(np.float32)
print "atom_overlap ",atom_overlap.shape
# [x]
ligand_not_overlap = np.mean(np.squeeze(atom_overlap),-1)<FLAGS.clash_size_cutoff
print "ligand_not_overlap num",np.sum(ligand_not_overlap)
print crystal_ligand_path
ligand_coords = ligand_coords[ligand_not_overlap]
ligand_affinity = ligand_affinity[ligand_not_overlap]
sorted_index = np.argsort(ligand_affinity)
ligand_coords = ligand_coords[sorted_index]
ligand_affinity = ligand_affinity[sorted_index]
return ligand_coords, ligand_affinity
def save_av4(filepath,labels,elements,multiframe_coords):
concatenated_coords = multiframe_coords[0]
for coords in multiframe_coords[1:]:
concatenated_coords = np.hstack((concatenated_coords,coords))
labels = np.asarray(labels*100,dtype=np.int32)
elements = np.asarray(elements,dtype=np.int32)
concatenated_coords = np.asarray(concatenated_coords,dtype=np.float32)
if not(int(len(concatenated_coords[:,0])==int(len(elements)))):
raise Exception('Number of atom elements is not equal, elements num {}, coords size {}'.format(len(elements),len(concatenated_coords[:,0])))
if not(int(len(concatenated_coords[0,:])==int(3*len(labels)))):
raise Exception('Number labels is not equal to the number of coordinate frames')
number_of_examples = np.array([len(labels)],dtype=np.int32)
av4_record = number_of_examples.tobytes()
av4_record += labels.tobytes()
av4_record += elements.tobytes()
av4_record += multiframe_coords.tobytes()
f = open(filepath,'w')
f.write(av4_record)
f.close()
def convert(fast_path):
file_name = os.path.basename(fast_path)
dest_name = file_name.replace('.pdb', '.av4')
file_id = re.search('(^[a-zA-Z0-9]{4}_\d+)', file_name).group()
receptor = file_name.split('_')[0]
crystalFolder = os.path.join(FLAGS.crystalPath, receptor)
similar_crystal = get_similar_crystal_file(crystalFolder, fast_path)
lig_coords,lig_elements,lig_affinity = parsePDB()
filtered_coords, filtered_affinity = overlap_filter(similar_crystal,lig_coords,lig_affinity)
av4_dest_path = os.path.join(FLAGS.super_dest_path,receptor,dest_name)
try_create_chain_parent_folder(av4_dest_path)
save_av4(av4_dest_path, filtered_affinity, lig_elements, filtered_coords)
def run():
FLAGS.tempPath = tempfile.mkdtemp()
fastFileList = glob(os.path.join(FLAGS.super_source, '*', '*.pdb'))
index = range(len(fastFileList))
if hasattr(FLAGS,'arrayjob') and FLAGS.arrayjob:
if hasattr(FLAGS,'offset'):
index = [len(index)/FLAGS.jobsize*FLAGS.offset+FLAGS.jobid]
else:
index = range(FLAGS.jobid-1,len(index),FLAGS.jobsize)
for i in index:
convert(fastFileList[i])
class FLAGS:
super_source = '/n/scratch2/xl198/data/Superposition/dock'
fast_path = '/n/scratch2/xl198/data/pdbs'
rigor_path = '/n/scratch2/xl198/YI/rigor/final'
rigor_so_path = '/n/scratch2/xl198/YI/rigor_so/final'
mix_path = '/n/scratch2/xl198/data/fusion/dock'
crystalPath = '/n/scratch2/xl198/data/H/addH'
dataframe =pd.read_csv("/n/scratch2/xl198/data/fusion/forms/simple_mix.csv")
super_dest_path = '/n/scratch2/xl198/data/Syperposition/dock_av4'
tanimoto_cutoff = 0.75
clash_cutoff_A = 4
clash_size_cutoff = 0.9
def parse_FLAG():
try:
opts,args = getopt.getopt(sys.argv[1:],None,["offset=","jobsize=","jobid=","cores=","scan"])
except getopt.GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
for name,value in opts:
if name == '--offset':
FLAGS.joboffset = int(value)
if name == '--jobsize':
FLAGS.jobsize = int(value)
print "--jobsize ",value
if name == '--jobid':
FLAGS.jobid = int(value)
print "--jobid", value
if name == '--cores':
FLAGS.cores = int(value)
if name == '--scan':
FLAGS.scan = True
if hasattr(FLAGS,"jobsize") and hasattr(FLAGS,"jobid"):
FLAGS.arrayjob = True
print "orchestra job ",FLAGS.arrayjob
if hasattr(FLAGS,'cores'):
print "cores num ",FLAGS.cores
if __name__ == '__main__':
parse_FLAG()
run()
|
#Operaciones de inxacion
cad="No te preocupes por los fracasos, preocúpate con las posibilidades que pierdes cuando ni siquiera lo intentas"
msg="el valor del indice 38 es {}"
print(msg.format(cad[38]))
|
from enum import Enum
class ExpenseCategory(Enum):
GAS = "gas"
OTHER = "other"
HORMIGA = "hormiga"
Pharmacy = "pharmacy"
SUPERMARKET = "supermarket"
TRANSPORTATION = "transportation"
@classmethod
def of(cls, name):
if name:
name = name.lower()
for category in ExpenseCategory:
if name == category.value:
return category
raise KeyError(f"Invalid category name: {name}")
|
import cv2
import numpy as np
image=cv2.imread("kelebek.jpg",0)
ret,thres1=cv2.threshold(image,127,255,cv2.THRESH_BINARY)#127nin altındaki pikseller sıfıra yuvaralanacak. diğerleri ise255 yuvarlanacak.
ret,thres2=cv2.threshold(image,127,255,cv2.THRESH_BINARY_INV)
ret,thres3=cv2.threshold(image,127,255,cv2.THRESH_TRUNC)
ret,thres4=cv2.threshold(image,127,255,cv2.THRESH_TOZERO)
ret,thres5=cv2.threshold(image,127,255,cv2.THRESH_TOZERO_INV)
cv2.imshow("orjinal resim",image)
cv2.imshow("thres1 resim",thres1)
cv2.imshow("thres2 resim",thres2)
cv2.imshow("thres3 resim",thres3)
cv2.imshow("thres4 resim",thres4)
cv2.imshow("thres5 resim",thres5)
cv2.waitKey()
cv2.destroyAllWindows() |
import numpy as np
a = np.array([1010,1000,990])
np.exp(a) / np.sum(np.exp(a))
c = np.max(a)
a - c
np.exp(a-c) / np.sum(np.exp(a-c))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.