text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-13 23:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('userprofiles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('expired', models.BooleanField(default=False)),
('expiration_date', models.DateTimeField()),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ListItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('image', models.ImageField(blank=True, default='item_picture/default.jpg', null=True, upload_to='item_picture/')),
('amazon_link', models.URLField()),
('price', models.DecimalField(decimal_places=2, max_digits=6)),
('list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='lists.List')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Pledge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('amount', models.DecimalField(decimal_places=2, max_digits=6)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pledges', to='lists.ListItem')),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pledges', to='userprofiles.UserProfile')),
],
),
]
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import Category, Base, Item, User
# import authorization
engine = create_engine("sqlite:///catalog.db")
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won"t be persisted into the database until you call
# session.commit(). If you"re not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# 1. Beverages - coffee, tea, juice, soda
# 2. Bread/Bakery - bread, rolls, tortillas, bagels
# 3. Canned/Jarred - vegetable, spaghetti, mayonaise, Ketchup
# 4. Dairy - cheese, eggs, milk, yogurt, butter
# 5. Dry/Baking Goods - cereal, flour, sugar, pasta, mixes
# 6. Frozen - waffles, vegatables, ice cream, tv dinners
# 7. Meat/Fish - lunch meat, poultry, beef, port, fish
# 8. Produce - fruits, vegetables
# 9. Other - baby, cat, dog
# Create dummy user
User1 = User(name="Charlene Wroblewski", email="cwroblew+catalog@gmail.com",
picture="https://www.facebook.com/photo.php?fbid=10204105998147240")
session.add(User1)
session.commit()
# add categories and initial items
category1 = Category(name="Beverages", description="coffee, tea, juice, soda")
session.add(category1)
session.commit()
item1 = Item(
name="Berres Brothers Sumatra",
description="12 oz. Sumatra Dark Coffee",
category=category1
)
session.add(item1)
session.commit()
item2 = Item(
name="Good Earth Organic Herbal Tea Caffeine Free Sweet & Spicy",
description="Naturally caffeine free rooibos red tea infused with spicy cinnamon and sweet orange",
category=category1
)
session.add(item2)
session.commit()
category2 = Category(name="Bread/Bakery", description="bread, rolls, tortillas, bagels")
session.add(category2)
session.commit()
item3 = Item(
name="Pepperidge Farm Whole Grain Bread",
description="""
Always crafted with 100% Whole Grain flour, Pepperidge Farm Whole Grain breads are delicious, with a good source of
fiber... our way of helping you maintain a balanced, healthy lifestyle.
""",
category=category2
)
session.add(item3)
session.commit()
item4 = Item(
name="David's Deli: Plain Bagels",
description="David's Deli: Plain Bagels, 5 ct, 14.25 oz",
category=category2)
session.add(item4)
session.commit()
category3 = Category(name="Canned/Jarred", description="vegetable, spaghetti, mayonaise, Ketchup")
session.add(category3)
session.commit()
item5 = Item(
name="Libby's Cream Style Sweet Corn",
description="8.5 Oz. Can Cream Style Sweet Corn",
category=category3)
session.add(item5)
session.commit()
category4 = Category(name="Dairy", description="cheese, eggs, milk, yogurt, butter")
session.add(category4)
session.commit()
item6 = Item(
name="Kemps 1% Low Fat Milk (Plastic Gallon)",
description="Rich, protein and nutrient rich milk with only 1% fat.",
category=category4)
session.add(item6)
session.commit()
item7 = Item(
name="Simple Truth Organic Plain Greek Nonfat Yogurt",
description="32 Oz tub of Plain Greek Nonfat Yogurt",
category=category4)
session.add(item7)
session.commit()
category5 = Category(name="Dry/Baking Goods", description="cereal, flour, sugar, pasta, mixes")
session.add(category5)
session.commit()
item8 = Item(
name="bear naked original cinnamon granola",
description="""
Non-GMO project verified whole grain oats, honey, dried cranberries, sunflower seeds, and cinnamon make it a nature
lover's delight with 6g of protein per serving
""",
category=category5)
session.add(item8)
session.commit()
category6 = Category(name="Frozen", description="waffles, vegatables, ice cream, tv dinners")
session.add(category6)
session.commit()
item9 = Item(
name="FLAV-R-PAC Brussel Sprouts",
description="Tender sprouts with mild, distinctive flavor",
category=category6)
session.add(item9)
session.commit()
category7 = Category(name="Meat/Fish", description="lunch meat, poultry, beef, port, fish")
session.add(category7)
session.commit()
item10 = Item(
name="Kroger Turkey Breast, Honey, Deli Thin Sliced",
description="16 Oz honey roasted Turkey, deli thin sliced",
category=category7)
session.add(item10)
session.commit()
category8 = Category(name="Produce", description="fruits, vegetables")
session.add(category8)
session.commit()
item11 = Item(
name="Ruby Frost Apple",
description="""
RubyFrost apples are the perfect balance of sweet and tart, deep and rich with a hearty crunch and ideal crisp
texture.
""",
category=category8)
session.add(item11)
session.commit()
category9 = Category(name="Other", description="baby, cat, dog")
session.add(category9)
session.commit()
item12 = Item(
name="Dave's Naturally Healthy Grain Free Canned Cat Food Chicken and Herring Dinner Formula",
description="""
Dave's Naturally Healthy Grain Free Canned Cat Food 5.5oz is the best value for a good quality Grain-Free canned cat
food you can buy.
""",
category=category9)
session.add(item12)
session.commit()
|
import json
import os
import traceback
from glob import glob
from typing import Dict, List
from colorama import Fore
from mysql.connector import MySQLConnection
from mysql.connector.cursor import MySQLCursor
from console import console
fields = {
"author": {"type": "string"},
"dynasty": {"type": "string"},
"title": {"type": "string"},
"rhythmic": {"type": "string"},
"chapter": {"type": "string"},
"paragraphs": {"type": "json"},
"notes": {"type": "json"},
"collection": {"type": "string"},
"section": {"type": "string"},
"content": {"type": "json"},
"comment": {"type": "json"},
"tags": {"type": "json"},
}
# 将json文件录入mysql数据库
def importData(connect: MySQLConnection, source: str, table: str, info: Dict):
path = info["path"]
dynasty = info["dynasty"]
collection = info["collection"]
author = info.get("author")
if isinstance(path, str):
names = list(glob(f"{source}/{path}"))
else:
names = []
for v in path:
names += list(glob(f"{source}/{v}"))
console.log()
begin = console.info(f"正在處理 {collection}")
cursor: MySQLCursor = connect.cursor()
success = 0
error = 0
for name in names:
filename = os.path.basename(name)
relName = os.path.relpath(name, source)
try:
console.log(f"正在處理文件 {relName}")
file = open(name, "r", encoding="utf-8")
data = json.loads(file.read())
if filename == "tangshisanbaishou.json":
data = shisanbai(data)
if not isinstance(data, list):
data = [data]
for poet in data:
values = []
for field in fields:
if fields[field]["type"] == "string":
value = poet.get(field, "")
if field == "collection":
value = collection
elif field == "dynasty":
value = dynasty
elif field == "author":
if author:
value = author
if not value:
value = "不詳"
values.append(value)
else:
value = poet.get(field, None)
if field == "tags":
if not isinstance(value, list):
try:
value = [value]
except:
value = []
if collection not in value:
value.append(collection)
# if field == "content" and value is not None:
# value = ""
values.append(json.dumps(value, ensure_ascii=False))
sql = f"INSERT INTO `{table}` VALUES (null,{','.join(map(lambda _:'%s',values))})"
cursor.execute(sql, values)
success += 1
except Exception:
console.error(traceback.format_exc())
end = console.error(f"{relName} 處理出错")
error += 1
if error == 0:
end = console.success(f"{collection} 處理完畢", begin)
else:
end = console.warning(f"{collection} 處理完畢,共有{error}个错误", begin)
cursor.close()
console.log()
return {
"collection": collection,
"count": success,
"time": f"{console.round(end - begin)}s",
}
# 录入作者
def importAuthors(
connect: MySQLConnection,
source: str,
table: str,
paths: List[str],
):
names = ()
for path in paths:
names += tuple(glob(f"{source}/{path}"))
console.log()
begin = console.log("正在處理 作者", Fore.CYAN)
cursor = connect.cursor()
success = 0
error = 0
for name in names:
name = os.path.normpath(name)
filename = os.path.basename(name)
relName = os.path.relpath(name, source)
try:
console.log(f"正在處理文件 {relName}")
file = open(name, "r", encoding="utf-8")
data = json.loads(file.read())
if type(data).__name__ != "list":
data = [data]
for author in data:
sql = f"INSERT INTO `{table}` VALUES (null,%s,%s,%s)"
cursor.execute(
sql,
(
author.get("name") or "",
author.get("desc") or author.get("description") or "",
author.get("short_description") or "",
),
)
success += 1
except Exception as e:
console.error(traceback.format_exc())
console.error(f"{relName} 處理出错")
error += 1
if error == 0:
end = console.success("作者 處理完畢", begin)
else:
end = console.warning(f"作者 處理完畢,共有{error}个错误", begin)
cursor.close()
console.log()
return {
"collection": "作者",
"count": success,
"time": f"{console.round(end - begin)}s",
}
# 特殊处理唐诗三百首
def shisanbai(data: Dict):
content = data["content"]
data["dynasty"] = "唐"
result: List[dict] = []
for group in content:
for poet in group["content"]:
if poet["subchapter"]:
poet["title"] = poet["subchapter"]
else:
poet["title"] = poet["chapter"]
del poet["chapter"]
del poet["subchapter"]
poet["tags"] = ["唐詩三百首", group["type"]]
result.append(poet)
return result
|
from celery import app
from .service import send_order_to_restaurant, send_delivery_notification_to_customer
@app.shared_task
def send_order_to_restaurant(restaurant_email, meal):
send_order_to_restaurant(restaurant_email, meal)
@app.shared_task
def send_delivery_notification_to_customer(user_email, meal, order_id):
send_delivery_notification_to_customer(user_email, meal, order_id)
|
import Tkinter
import tkMessageBox
import numpy as np
import caffe
import glob
import pylab as pl
from tkFileDialog import askdirectory
import cv2
import sys
classes = {0: "safe driving", 1:"texting right", 2: "talking on the phone right",
3: "texting (left)",4: "talking on the phone (left)",5: "operating the radio",
6: "drinking",7: "reaching behind",8: "hair and makeup",9: "talking to passenger"}
if len(sys.argv) > 1:
op = sys.argv[1]
if op == "2" and len(sys.argv) > 2:
filename = sys.argv[2]
elif op == "2" and len(sys.argv) <= 2:
print("Error: Usage python interface 2 [video_frames_path]")
exit()
else:
print("Error: Usage python interface [op] ")
exit()
def initialize_transformer(image_mean, is_flow):
shape = (1, 3, 227, 227)
transformer = caffe.io.Transformer({'data': shape})
channel_mean = np.zeros((3,227,227))
for channel_index, mean_val in enumerate(image_mean):
channel_mean[channel_index, ...] = mean_val
transformer.set_mean('data', channel_mean)
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2, 1, 0))
transformer.set_transpose('data', (2, 0, 1))
return transformer
ucf_mean_RGB = np.zeros((3,1,1))
ucf_mean_RGB[0,:,:] = 103.939
ucf_mean_RGB[1,:,:] = 116.779
ucf_mean_RGB[2,:,:] = 128.68
transformer_RGB = initialize_transformer(ucf_mean_RGB, False)
# Extract list of frames in video
RGB_images = []
#classify images with singleFrame model
def singleFrame_classify_images(net, transformer):
if op == "1":
mean_val = 0
_ = 0
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
input_im = caffe.io.resize_image(frame, (277,277))
caffe_in = transformer.preprocess('data',input_im)
net.blobs['data'].data[...] = caffe_in
out = net.forward()
# getting the probabilities
val =out['probs'][0][:10]
sorted_ = np.sort(val)
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,str(classes[np.where(val == sorted_[-1])[0][0]] + ":" + str(val[np.where(val == sorted_[-1])[0][0]])),
(frame.shape[1]/2,frame.shape[0]-100), font, 0.5,(0,255,0),2,cv2.LINE_AA)
cv2.putText(frame,str(classes[np.where(val == sorted_[-2])[0][0]] + ":" + str(val[np.where(val == sorted_[-2])[0][0]])),
(frame.shape[1]/2,frame.shape[0]-80), font, 0.5,(0,255,0),2,cv2.LINE_AA)
cv2.putText(frame,str(classes[np.where(val == sorted_[-3])[0][0]] + ":" + str(val[np.where(val == sorted_[-3])[0][0]])),
(frame.shape[1]/2,frame.shape[0]-60), font, 0.5,(0,255,0),2,cv2.LINE_AA)
mean_val += np.where(val == sorted_[-1])[0][0]
_+=1
frame = cv2.resize(frame, (640,480))
# Display the resulting frame
cv2.imshow('frame',frame)
cv2.resizeWindow('frame', 400, 400)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
mean_val = mean_val/_
cap.release()
cv2.destroyAllWindows()
return mean_val
elif op == "2":
frames = glob.glob('%s/*.jpg' %(filename))
output_predictions = np.zeros((len(frames),10))
mean_val = 0
c = 0
for im in frames:
# reading the image
frame = cv2.imread(im,1)
input_im = caffe.io.load_image(im)
#resizing if it's necessary
imageResized = caffe.io.resize_image(input_im, (277,277))
caffe_in = transformer.preprocess('data',imageResized)
net.blobs['data'].data[...] = caffe_in
out = net.forward()
# getting the probabilities
val =out['probs'][0][:10]
output_predictions[c]=val
sorted_ = np.sort(val)
#gray = cv2.cvtColor(input_im, cv2.COLOR_BGR2GRAY)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,str(classes[np.where(val == sorted_[-1])[0][0]] + ":" + str(val[np.where(val == sorted_[-1])[0][0]])),
(frame.shape[1]/9,frame.shape[0]-100), font, 0.5,(0,255,0),2,cv2.LINE_AA)
cv2.putText(frame,str(classes[np.where(val == sorted_[-2])[0][0]] + ":" + str(val[np.where(val == sorted_[-2])[0][0]])),
(frame.shape[1]/9,frame.shape[0]-80), font, 0.5,(0,255,0),2,cv2.LINE_AA)
cv2.putText(frame,str(classes[np.where(val == sorted_[-3])[0][0]] + ":" + str(val[np.where(val == sorted_[-3])[0][0]])),
(frame.shape[1]/9,frame.shape[0]-60), font, 0.5,(0,255,0),2,cv2.LINE_AA)
mean_val += np.where(val == sorted_[-1])[0][0]
cv2.imshow('frame',frame)
if cv2.waitKey(5) & 0xFF == ord('q'):
break
del input_im
c+=1
mean_val = mean_val/len(frames)
return mean_val
def classifyVideo():
#Models and weights
singleFrame_model = 'deploy_singleFrame.prototxt'
RGB_singleFrame = 'no_data_augm_iter_1000.caffemodel'
RGB_singleFrame_net = caffe.Net(singleFrame_model, RGB_singleFrame, caffe.TEST)
output = singleFrame_classify_images(RGB_singleFrame_net, transformer_RGB)
print(output)
del RGB_singleFrame_net
#tkMessageBox.showinfo( "Hello Python", filename)
if __name__ == '__main__':
classifyVideo() |
' Find line numbers for specific latlon locations using the compare.prn file '
from numpy import *
compareFile = open ('compare.prn','rb')
compareArr = compareFile.readlines(); compareFile.close()
inputfile = raw_input ('Input filename: ')
infile = open (inputfile,'rb')
infileArr = infile.readlines(); infile.close()
outfile = open ('lineNums.prn','wb')
' Generate list of latlons in input file...assume that the order of input files 1 and 2 are the same (gulp) '
locList = list()
for i in range (len(infileArr)):
locStr = '%8.3f%8.3f' % (float(infileArr[i][0:8]), float(infileArr[i][8:16]))
locList.append(locStr)
' Use index function to find correct row of data, then write to array '
for o in range (len(compareArr)):
loc = locList.index(str(compareArr[o][0:16]))
outfile.write ('%d\n' % loc)
outfile.close()
|
import sys
from PyQt5.QtWidgets import *
class QLineEditEchoMode(QWidget):
"""QLineEdit类是一个单行文本控件,可输入单行字符串,可以设置回显模式(Echomode)和掩码模式
1. 回显模式(Echomode)
回显模式就是当键盘被按下后,显示了什么
Normal 正常的回显模式
NoEcho 不回显模式(什么都没出现)
Password 密码
PasswordEchoOnEdit 先是显示,然后过了几秒就不显示
"""
def __init__(self):
super(QLineEditEchoMode, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('学习 文本框的回显模式')
formLayout = QFormLayout()
normalLineEdit = QLineEdit()
noEchoLineEdit = QLineEdit()
passwordLineEdit = QLineEdit()
passwordEchoOnLineEdit = QLineEdit()
formLayout.addRow("请我喝一杯咖啡 Normal", normalLineEdit)
formLayout.addRow("请我喝2杯咖啡 noEcho", noEchoLineEdit)
formLayout.addRow("请我喝3杯咖啡 password", passwordLineEdit)
formLayout.addRow("请我喝4杯咖啡 passwordEchoOnEdit", passwordEchoOnLineEdit)
# placeoldertext 文本输入框内的灰色提示
normalLineEdit.setPlaceholderText("input Normal")
noEchoLineEdit.setPlaceholderText("input noEcho")
passwordLineEdit.setPlaceholderText("input password")
passwordEchoOnLineEdit.setPlaceholderText("input passwordEchoOnEdit")
# 设置回显模式
normalLineEdit.setEchoMode(QLineEdit.Normal)
noEchoLineEdit.setEchoMode(QLineEdit.NoEcho)
passwordLineEdit.setEchoMode(QLineEdit.Password)
passwordEchoOnLineEdit.setEchoMode(QLineEdit.PasswordEchoOnEdit)
self.setLayout(formLayout)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = QLineEditEchoMode()
main.show()
sys.exit(app.exec_())
|
# Generated by Django 2.2.6 on 2020-11-02 14:10
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('p_pols', '0009_poll_pub_date'),
]
operations = [
migrations.AddField(
model_name='answer',
name='poll',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='p_pols.Poll'),
),
migrations.AlterField(
model_name='poll',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2020, 11, 2, 14, 10, 24, 937363, tzinfo=utc), help_text='Дата публикации', verbose_name='Дата публикации'),
),
]
|
# 查询指定规划师完成的订单列表
select_planner_complete_order_list = "SELECT du.`Id`,du.`DemandServiceId`,du.`UserId`,du.`OrderStatus`,du.`CreateUserID`,du.`CreateTime`, " \
"ds.PriceStart,ds.PriceEnd, " \
"ds.TimeStart,ds.TimeEnd,sa.Name AS ServiceAreaName,st.Name AS ServiceTypeName, " \
"ds.Description,ui.Name,u.Phone,plannerU.Name AS PlannerName,plannerUser.Phone AS plannerPhone , " \
"du.`OrderStatus`,ui.`HeadImage`, du.EvaluateContent , " \
"CASE du.`OrderStatus` " \
" WHEN 2 THEN '客服回访' " \
" WHEN 3 THEN '拟定合同' " \
" WHEN 4 THEN '线下签约' " \
" WHEN 5 THEN '平台审查' " \
" WHEN 6 THEN '付款确认' " \
" WHEN 7 THEN '服务完成' " \
" ELSE '订单有误' " \
"END AS OrderStatusStr " \
"FROM `DS_Order` du " \
"JOIN DS_DemandService ds ON ds.Id=du.DemandServiceId " \
"JOIN Base_ServiceArea sa ON sa.Id=ds.ServiceAreaId " \
"JOIN Base_ServiceType st ON st.Id=ds.ServiceTypeId " \
"JOIN U_UserInfo plannerU ON du.`PlannerUserId`=plannerU.UserId " \
"JOIN U_UserInfo ui ON ui.UserId=du.`UserId` " \
"JOIN U_User u ON u.Id=du.`UserId` " \
"JOIN U_User plannerUser ON plannerUser.Id=du.`PlannerUserId` " \
"WHERE du.IsDelete= FALSE " \
"AND plannerU.`UserId`='%s' " \
"AND du.`OrderStatus`=7 " \
"ORDER BY du.CreateTime DESC " \
"LIMIT %s , %s"
# 查询指定规划师的订单列表
select_planner_order_list = "SELECT du.`Id`,du.`DemandServiceId`,du.`UserId`,du.`OrderStatus`,du.`CreateUserID`,du.`CreateTime`, " \
"ds.PriceStart,ds.PriceEnd, " \
"ds.TimeStart,ds.TimeEnd,sa.Name AS ServiceAreaName,st.Name AS ServiceTypeName, " \
"ds.Description,ui.Name,u.Phone,plannerU.Name AS PlannerName,plannerUser.Phone AS plannerPhone , " \
"du.`OrderStatus`,ui.`HeadImage`, " \
"CASE du.`OrderStatus` " \
" WHEN 2 THEN '客服回访' " \
" WHEN 3 THEN '拟定合同' " \
" WHEN 4 THEN '线下签约' " \
" WHEN 5 THEN '平台审查' " \
" WHEN 6 THEN '付款确认' " \
" WHEN 7 THEN '服务完成' " \
" ELSE '订单有误' " \
"END AS OrderStatusStr ,IF(ee.Id IS NULL,0,1) AS isEvaluate,IF(du.OrderStatus=7,1,0) AS isFished " \
"FROM `DS_Order` du " \
"JOIN DS_DemandService ds ON ds.Id=du.DemandServiceId " \
"JOIN Base_ServiceArea sa ON sa.Id=ds.ServiceAreaId " \
"JOIN Base_ServiceType st ON st.Id=ds.ServiceTypeId " \
"JOIN U_UserInfo plannerU ON du.`PlannerUserId`=plannerU.UserId " \
"JOIN U_UserInfo ui ON ui.UserId=du.`UserId` " \
"JOIN U_User u ON u.Id=du.`UserId` " \
"JOIN U_User plannerUser ON plannerUser.Id=du.`PlannerUserId` " \
"LEFT JOIN `U_Evaluate` ee ON ee.`OrderId`=du.`Id` AND ee.IsFirst=1 " \
"WHERE du.IsDelete= FALSE " \
"AND plannerU.`UserId`='%s' " \
"ORDER BY du.CreateTime DESC " \
"LIMIT %s , %s"
# 查询指定规划师的订单详情
select_planner_order_detail = "SELECT du.`Id`,du.`DemandServiceId`,du.`UserId`,du.`OrderStatus`,du.`CreateUserID`,du.`CreateTime`,du.`ModifTime`, " \
"ds.PriceStart,ds.PriceEnd, " \
"ds.TimeStart,ds.TimeEnd,sa.Name AS ServiceAreaName,st.Name AS ServiceTypeName, " \
"ds.Description,ui.Name,u.Phone,plannerU.Name AS PlannerName,plannerUser.Phone AS plannerPhone , " \
"du.`OrderStatus`,ui.`HeadImage`,'' as OrderFlowing, " \
"CASE du.`OrderStatus` " \
" WHEN 2 THEN '客服回访' " \
" WHEN 3 THEN '拟定合同' " \
" WHEN 4 THEN '线下签约' " \
" WHEN 5 THEN '平台审查' " \
" WHEN 6 THEN '付款确认' " \
" WHEN 7 THEN '服务完成' " \
" ELSE '订单有误' " \
"END AS OrderStatusStr " \
"FROM `DS_Order` du " \
"JOIN DS_DemandService ds ON ds.Id=du.DemandServiceId " \
"JOIN Base_ServiceArea sa ON sa.Id=ds.ServiceAreaId " \
"JOIN Base_ServiceType st ON st.Id=ds.ServiceTypeId " \
"JOIN U_UserInfo plannerU ON du.`PlannerUserId`=plannerU.UserId " \
"JOIN U_UserInfo ui ON ui.UserId=du.`UserId` " \
"JOIN U_User u ON u.Id=du.`UserId` " \
"JOIN U_User plannerUser ON plannerUser.Id=du.`PlannerUserId` " \
"WHERE du.IsDelete= FALSE " \
"AND du.`Id`='%s' "
# 获取订单的状态 以及时间
get_order_status = "SELECT `Id`,`OrderId`,`StartStatus`,`EndStatus`,`ChangeTime` FROM `DS_OrderFlowingWater` " \
"WHERE `OrderId`='%s' AND `IsDelete`=FALSE " \
"ORDER BY `CreateTime` DESC"
# 获取指定订单的评论
select_order_evaluate = "SELECT e.`OrderId`,e.`Content`,e.`CreateTime` ,ui.`Name` " \
"FROM `U_Evaluate` e " \
"LEFT JOIN `U_UserInfo` ui ON e.`UserId` = ui.`UserId` " \
"WHERE e.`OrderId`='%s' AND e.`IsFirst`=1 " \
"ORDER BY e.`CreateTime` " \
"LIMIT %s , %s"
insert_order = "INSERT INTO `DS_Order` (`Id`,`PlannerUserId`,`UserId`,`ContractId`,`Type`,`DemandServiceId`,`DemandServiceDescription`," \
"`Description`,`ServiceAreaId`,`ServiceTypeId`,`PriceStart`,`PriceEnd`,`TimeStart`,`TimeEnd`," \
"`CreateUserID`,`CreateTime`) " \
"VALUES (UUID(),'%s','%s',%s,%s,'%s','%s','%s',%s,%s,%s,%s,'%s','%s','%s',NOW())"
select_order_createtime = "SELECT IF(DATE_ADD(`CreateTime`,INTERVAL 30 SECOND) >= NOW(),0,1) AS isCanInsert FROM `DS_Order` WHERE `CreateUserID`='%s' ORDER BY `CreateTime` DESC LIMIT 1"
select_order_is_evaluate="SELECT count(0) as total FROM U_Evaluate WHERE OrderId='%s';"
# 新增评论
insert_evaluate = "UPDATE `DS_Order` SET `EvaluateContent`='%s',`Synthesis`='%s',`Quality`='%s',`Efficiency`='%s',`Lable`='%s' " \
"WHERE `Id`='%s' AND `UserId`='%s' AND `OrderStatus`=7 " \
"AND NOT EXISTS (SELECT Id FROM U_Evaluate WHERE OrderId='%s');" \
"UPDATE `U_PlannerStatistics` ps,`DS_Order` o SET ps.`NewEvaluate` = o.`EvaluateContent`" \
",ps.`PraiseCount`=`PraiseCount`+(CASE o.`Synthesis`>=3 WHEN TRUE THEN 1 ELSE 0 END)" \
",`BadReviewCount`=`BadReviewCount`+(CASE o.`Synthesis`>=3 WHEN TRUE THEN 0 ELSE 1 END) " \
"WHERE ps.`UserId`=o.`PlannerUserId` AND o.`Id`='%s' AND o.`UserId`='%s' AND o.`OrderStatus`=7 " \
"AND NOT EXISTS (SELECT Id FROM U_Evaluate WHERE OrderId='%s');" \
"INSERT INTO `U_Evaluate` ( `OrderId`, `UserId`, `Content`, `Sort`, `IsFirst`, `CreateUserID`, `CreateTime`, `ModifUserID`, `ModifTime`, `IsDelete`) " \
"SELECT '%s', '%s', '%s', '%s',1, '%s', NOW(), '%s', NOW(), FALSE " \
"FROM DS_Order WHERE `Id`='%s' AND `UserId`='%s' AND `OrderStatus`=7 " \
"AND NOT EXISTS (SELECT Id FROM U_Evaluate WHERE OrderId='%s') LIMIT 0,1;" \
"UPDATE `U_UserLable` ul,`DS_Order` o SET ul.`Count`=ul.`Count`+1,ul.`ModifUserID`='%s',ul.`ModifTime`=NOW() " \
"WHERE o.`Id`='%s' AND o.`UserId`='%s' AND o.`OrderStatus`=7 AND ul.`UserId`=o.`PlannerUserId` AND ul.`LableName`='%s' ; " \
"INSERT INTO `U_UserLable` (`UserId`,`LableName`,`Count`,`Sort`,`CreateUserID`,`CreateTime`,`ModifUserID` ,`ModifTime`,`IsDelete`) " \
"SELECT o.`PlannerUserId`,'%s',1,0,'%s',NOW(),'%s',NOW(),FALSE FROM `DS_Order` o " \
"WHERE o.`Id`='%s' AND o.`UserId`='%s' AND o.`OrderStatus`=7 " \
"AND NOT EXISTS (SELECT Id FROM U_UserLable WHERE UserId=o.PlannerUserId AND LableName='%s') LIMIT 0,1;"
# 回复评论
replay_evaluate = "INSERT INTO `U_Evaluate` ( `OrderId`, `UserId`, `Content`, `Sort`, `IsFirst`, `CreateUserID`, `CreateTime`, `ModifUserID`, `ModifTime`, `IsDelete`) " \
"SELECT '%s', '%s', '%s', '%s',0, '%s', NOW(), '%s', NOW(), FALSE " \
"FROM DS_Order WHERE `Id`='%s' AND (`UserId`='%s' OR PlannerUserId='%s') AND `OrderStatus`=7 " \
"AND EXISTS (SELECT Id FROM U_Evaluate WHERE OrderId='%s') LIMIT 0,1;"
# 查询指定订单的评论详情
select_evaluate_info = "SELECT e.`OrderId`,e.`Content`,e.`CreateTime` ,ui.`Name`, ui.`HeadImage` " \
"FROM `U_Evaluate` e " \
"LEFT JOIN `U_UserInfo` ui ON e.`UserId` = ui.`UserId` " \
"WHERE e.`OrderId`='%s' " \
"ORDER BY e.`IsFirst` DESC,e.`Sort` DESC,e.`CreateTime` DESC " \
"LIMIT %s , %s"
# 新增订单流水表数据
insert_order_flowing = "insert into `DS_OrderFlowingWater` (`OrderId`,`UserId`,`StartStatus`,`EndStatus`,`Remarks`,`ChangeTime`,`CreateUserID`,`CreateTime`) " \
"values('%s','%s',%s,%s,'',now(),'%s',now())"
#更新订单状态
update_order_status = "UPDATE `DS_Order` SET OrderStatus=%s,ModifUserID='%s',ModifTime=NOW() WHERE Id='%s' AND OrderStatus=%s"
#订单完成的时候修改规划师统计的订单数量和客户数量
update_planner_statistics = "UPDATE `U_PlannerStatistics` ps JOIN `DS_Order` o SET ps.CustomerCount=ps.CustomerCount+1,ps.OrderCount=ps.OrderCount+1,ps.ModifUserID='%s',ps.ModifTime=NOW() WHERE o.Id='%s' AND ps.UserId=o.`PlannerUserId` AND o.OrderStatus= %s AND %s=7"
|
import logging
from flask import request
from flask_restplus import Resource
from api.serializers import wishlist, wishlist_response, wishlist_item, wishlist_item_response, wishlist_items_list_response, response
from api.parsers import pagination_arguments
from api.restplus import api, token_required
from services.wishlist_service import *
log = logging.getLogger(__name__)
ns = api.namespace(
'wishlists', description='Operations related to customers wishlists')
@ns.route('/')
class WishlistPost(Resource):
@api.doc(security='apikey')
@token_required
@api.response(201, 'Wishlist successfuly created')
@api.expect(wishlist)
@api.marshal_with(wishlist_response)
def post(self):
data = request.json
response = create_wishlist(data)
return response, response.status_code
@ns.route('/<int:id>')
class WishlistOperations(Resource):
@api.doc(security='apikey')
@token_required
@api.response(201, 'Wishlist item successfully created')
@api.expect(wishlist_item)
@api.marshal_with(wishlist_item_response)
def post(self, id):
data = request.json
response = add_wishlist_item(id, data)
return response, response.status_code
@api.doc(security='apikey')
@token_required
@api.expect(pagination_arguments)
@api.response(200, 'Wishlist items successfully fetched')
@api.marshal_with(wishlist_items_list_response)
def get(self, id):
args = pagination_arguments.parse_args(request)
page = args.get('page', 1)
per_page = args.get('per_page', 10)
response = get_wishlist_items(id, page, per_page)
return response, response.status_code
@api.doc(security='apikey')
@token_required
@api.response(200, 'Wishlist succesfully deleted')
@api.marshal_with(response)
def delete(self, id):
response = delete_wishlist(id)
return response, response.status_code
@ns.route('/<int:id>/product/<string:product_id>')
class WishListItemOperations(Resource):
@api.doc(security='apikey')
@token_required
@api.response(200, 'Wishlist item succesfully deleted')
@api.marshal_with(response)
def delete(self, id, product_id):
response = delete_wishlist_item(id, product_id)
return response, response.status_code
|
from survey import AnonymousSurvey
# 定义一个问题, 并创建一个表示调查的AnonymousSurvey对象
question = "你会什么语言?\n"
my_survey = AnonymousSurvey(question)
# 显示问题并存储答案
my_survey.show_question()
print("输入q退出程序\n")
while True:
response = input("会的语言是: ")
if response == 'q':
break
my_survey.store_response(response)
# 显示调查结果
print("谢谢参与调查")
my_survey.show_results() |
from django.urls import path, include
from . import views
urlpatterns = [
path('hello', views.hello, name="hello"),
path('schedule', views.schedule, name="schedule"),
] |
def calculateArea(radius) :
pie = 3.143
return pie * radius * radius
# Needed to run the file as a CLI program from the command line
def main():
radius = float(input("\nPlease, enter the radius here: "))
area = calculateArea(radius)
print(f"The Area of the circle with radius {radius:.2f} is {area:.2f}")
# Runs the file when the program is run from the command line
main() |
import random
import math
import numpy as np
from scipy.stats import multivariate_normal
def expectation_maximization(df, pca_df):
"""
Main method for classification using Expectation Maximization
Uses the PCA features from part 5
:param df: Loaded data as a DataFrame
:param pca_df: DataFrame with PCA generated features
"""
# Create copies to avoid modifying the original DataFrames
em_df = pca_df.copy(deep=True)
df = df.copy(deep=True)
# Choose random starting clusters for initial statistics
choose_random_starting_clusters(em_df)
# Calculate initial parameters from the randomly selected clusters
data_array, mean, covariance = calculate_initial_parameters(em_df)
# Call to the iteration and get the results, iteration handles the e and m stages
results = iterate_expectation_maximization(data_array, mean, covariance)
# Add classification results to df
em_result_df = classify(df, em_df, results)
# Output results to a CSV
output_expectation_maximization_result(em_result_df)
def choose_random_starting_clusters(df):
"""
Method for choosing random clusters
:param df: DataFrame with PCA generated features
:return df: DataFrame with PCA generated features and randomly assigned starting classes
"""
# Create a list of the indices
index = df.index.tolist()
# Randomly shuffle the list
random.shuffle(index)
# Calculate splits
first_split = (math.ceil(len(index) / 3))
second_split = (math.ceil((len(index) / 3) * 2))
# Break index list based on splits
first_part = index[0:first_split]
second_part = index[first_split:second_split]
third_part = index[second_split:len(index)]
# Assign clusters based on broken up index list
df['cluster'] = 0
df.at[first_part, 'cluster'] = 1
df.at[second_part, 'cluster'] = 2
df.at[third_part, 'cluster'] = 3
def calculate_initial_parameters(df):
"""
Method for calculating initial parameters for the randomly chosen clusters
:param df: DataFrame with PCA generated features
:return data_array: df as a numpy array
:return mean: List of means by randomly chosen clusters
:return covariance: List of covariance matrices by randomly chosen clusters
"""
data_array = df[["PCA 1", "PCA 2"]].values
mean = []
covariance = []
# Calculate test statistics by cluster
for index in range(1, 4):
temp_df = df.loc[df['cluster'] == index][["PCA 1", "PCA 2"]]
mean.append(temp_df.mean().values)
covariance.append(np.cov(temp_df.values.T))
return data_array, mean, covariance
def iterate_expectation_maximization(data_array, mean, covariance):
"""
Method for handling iteration through e and m steps
:param data_array: numpy array of data
:param mean: List of means by initial clusters
:param covariance: List of covariance matrices by initial clusters
:return mixture: Ending mixture probabilities for each row and by cluster
"""
# Initial mixture calculation
mixture = calculate_e_step(data_array, mean, covariance)
# Maximum range is 1000
for index in range(1000):
# M-step
new_mean, new_covariance, new_probabilities = calculate_m_step(data_array, mixture)
# Create test statistic based off of changed means for convergence test
test = 0
for class_index in range(3):
test += abs(new_mean[class_index].sum() - mean[class_index].sum())
# Test convergence
if test <= .00000001:
# Break from for loop if converged
break
# Else set test statistics = new test statistics and perform E-step
else:
mean = new_mean
covariance = new_covariance
mixture = calculate_e_step(data_array, mean, covariance)
return mixture
def calculate_e_step(data_array, mean, covariance):
"""
Method for handling E-step, calculation of mixture probabilities based on current cluster test-statistics
Uses Gaussian Model from scipy to create a multivariate PDF
:param data_array: numpy array of data
:param mean: List of means by current clusters
:param covariance: List of covariance matrices by current clusters
:return mixture: Current mixture probabilities for each row and by cluster
"""
gaussian = []
# Generate gaussian PDFs for the current clusters
for index in range(3):
gaussian_model = multivariate_normal(mean=mean[index], cov=covariance[index])
gaussian.append(gaussian_model.pdf(data_array))
# Calculate mixtures for each row by class
mixture = []
denominator = gaussian[0] + gaussian[1] + gaussian[2]
for index in range(3):
mixture_array = (gaussian[index] / denominator)
mixture.append(mixture_array.reshape(len(data_array), 1))
return mixture
def calculate_m_step(data_array, mixture):
"""
Method for handling M-step, recalculation of test-statistics based on current mixtures
:param data_array: numpy array of data
:param mixture: Current mixture probabilities for each row and by cluster
:return mean: List of means by current mixture
:return covariance: List of covariance matrices by current mixture
:return probabilities: List of probabilities by current mixture
"""
mean = []
covariance = []
probabilities = []
# Calculate new test statistics over each class's current mixtures
for index in range(3):
# Sum mixtures
mixture_sum = mixture[index].sum(axis=0)
# Mean calculation
mean_step1 = (mixture[index] * data_array).sum(axis=0)
new_mean = mean_step1 / mixture_sum
mean.append(new_mean)
# Covariance calculation
covariance_step1 = data_array - new_mean
covariance_step2 = (mixture[index] * covariance_step1).T.dot(covariance_step1)
new_covariance = covariance_step2 / mixture_sum
covariance.append(new_covariance)
# Probabilities calculation
probabilities.append(mixture_sum / len(data_array))
return mean, covariance, probabilities
def classify(df, em_df, mixture):
"""
Method for classifying data based on final mixtures
:param df: Loaded data as a DataFrame
:param em_df: DataFrame with PCA generated features
:param mixture: Ending mixture probabilities for each row and by cluster
:return df: Loaded data with PCA features and em_classes added on
"""
classification = []
# Merge df with em_df
df = df.merge(em_df[["PCA 1", "PCA 2"]], left_index=True, right_index=True)
# Assign classes to a list based on highest final mixtures
for index in range(len(df)):
if mixture[0][index] > mixture[1][index] and mixture[0][index] > mixture[2][index]:
classification.append(1)
elif mixture[1][index] > mixture[0][index] and mixture[1][index] > mixture[2][index]:
classification.append(2)
elif mixture[2][index] > mixture[0][index] and mixture[2][index] > mixture[1][index]:
classification.append(3)
# Assign classes to df
df['em_class'] = classification
return df
def output_expectation_maximization_result(df):
"""
Method for outputting current DataFrame at the end of this module to a csv
:param df: Passed in DataFrame for this module
"""
df.to_csv("output/part6a_expectation_maximization_data.csv")
|
'''
Definitions for nodes in the abstract syntax tree.
'''
from __future__ import absolute_import, print_function
from frontend import typesys
class Node(object):
def accept(self, visitor, arg=None):
return visitor.visit(self, arg)
@property
def children(self):
return list()
def replace(self, child, node):
keys = self.__dict__.keys()
for k in keys:
if self.__dict__[k] != child:
continue
self.__dict__[k] = node
return True
for c in filter(None, self.children):
if c.replace(child, node):
return True
return False
@property
def position(self):
if hasattr(self, 'pos_info'):
return self.pos_info
for c in filter(None, self.children):
if not isinstance(c, Node):
continue
pos_info = c.position
if pos_info is not None:
return pos_info
@property
def type(self):
if hasattr(self, '_type'):
return self._type
@type.setter
def type(self, val):
assert isinstance(val, typesys.Type)
self._type = val
def __str__(self):
return self.name
class ProgramNode(Node):
def __init__(self, identifier, block, identifier_list=None):
self.identifier = identifier
self.identifier_list = identifier_list
self.block = block
@property
def children(self):
return [self.identifier,
self.identifier_list,
self.block]
def __str__(self):
return "Program"
class IdentifierListNode(Node):
def __init__(self, ident, ident_list=None):
self._children = list()
if ident_list:
self._children.extend(ident_list._children)
self._children.append(ident)
@property
def children(self):
return self._children
def __str__(self):
return "Identifier list"
class BlockNode(Node):
def __init__(self, label_list, const_list, type_list, var_list, func,
stmt):
self.label_list = label_list
self.const_list = const_list
self.type_list = type_list
self.var_list = var_list
self.func = func
self.stmt = stmt
@property
def children(self):
return [self.label_list,
self.const_list,
self.type_list,
self.var_list,
self.func,
self.stmt]
def __str__(self):
return "Block"
class LabelDeclNode(Node):
def __init__(self, label_list):
self.label_list = label_list
@property
def children(self):
return [self.label_list]
def __str__(self):
return "Label declaration"
class LabelListNode(Node):
def __init__(self, label, label_list=None):
self._children = list()
if label_list:
self._children.extend(label_list._children)
self._children.append(label)
@property
def children(self):
return self._children
def __str__(self):
return "Label list"
class LabelNode(Node):
def __init__(self, name):
self.name = name
def __str__(self):
return "Label (%s)" % self.name
class ConstListNode(Node):
def __init__(self, const_def, const_def_list=None):
self._children = list()
if const_def_list:
self._children.extend(const_def_list._children)
self._children.append(const_def)
@property
def children(self):
return self._children
def __str__(self):
return "Constant list"
class ConstDeclNode(Node):
def __init__(self, identifier, expr):
self.identifier = identifier
self.expr = expr
@property
def children(self):
return [self.identifier, self.expr]
def __str__(self):
return "Constant declaration"
class BinaryOpNode(Node):
def __init__(self, op, left, right):
self.left = left
self.op = op
self.right = right
@property
def children(self):
return [self.left, self.op, self.right]
def __str__(self):
return "BinaryOp"
class UnaryOpNode(Node):
def __init__(self, name, expr):
self.name = name
self.expr = expr
@property
def children(self):
return [self.expr]
def __str__(self):
return "UnaryOp (%s)" % self.name
class VarAccessNode(Node):
def __init__(self, identifier):
self.identifier = identifier
@property
def children(self):
return [self.identifier]
def __str__(self):
return "Variable access"
class ValueNode(Node):
pass
class StringNode(ValueNode):
def __init__(self, value):
self.value = str(value)
def __str__(self):
return "String ('%s')" % str(self.value.replace("\n", "\\n"))
class CharNode(ValueNode):
def __init__(self, value):
self.value = value
def __str__(self):
return "Char (%s)" % self.value
class IntegerNode(ValueNode):
def __init__(self, value):
self.value = int(value)
def __str__(self):
return "Integer (%s)" % str(self.value)
class RealNode(ValueNode):
def __init__(self, value):
self.value = float(value)
def __str__(self):
return "Real (%s)" % str(self.value)
class TypeDeclListNode(Node):
def __init__(self, typedef, typedecl_list=None):
self._children = list()
if typedecl_list:
self._children.extend(typedecl_list._children)
self._children.append(typedef)
@property
def children(self):
return self._children
def __str__(self):
return "Type definition list"
class TypeDeclNode(Node):
def __init__(self, identifier, type_denoter):
self.identifier = identifier
self.type_denoter = type_denoter
@property
def children(self):
return [self.identifier, self.type_denoter]
def __str__(self):
return "Type definition"
class TypeNode(Node):
def __init__(self, identifier, attr=None):
self.attr = attr
self.identifier = identifier
@property
def children(self):
return [self.attr,
self.identifier]
def __str__(self):
return "Type"
class EnumTypeNode(Node):
def __init__(self, identifier_list, attr=None):
self.attr = attr
self.identifier_list = identifier_list
@property
def children(self):
return [self.attr,
self.identifier_list]
def __str__(self):
return "Enum type"
class RangeNode(Node):
def __init__(self, start, stop):
self.start = start
self.stop = stop
@property
def children(self):
return [self.start, self.stop]
def __str__(self):
return "Range"
class ArrayTypeNode(Node):
def __init__(self, index_list, component_type):
self.index_list = index_list
self.component_type = component_type
@property
def children(self):
return [self.index_list, self.component_type]
def __str__(self):
return "Array type"
class IndexListNode(Node):
def __init__(self, index_type, index_list=None):
self._children = list()
if index_list:
self._children.extend(index_list._children)
self._children.append(index_type)
@property
def children(self):
return self._children
def __str__(self):
return "Index list"
class RecordTypeNode(Node):
def __init__(self, section_list, variant):
self.variant = variant
self.section_list = section_list
@property
def children(self):
return [self.section_list, self.variant]
def __str__(self):
return "Record type"
class RecordSectionListNode(Node):
def __init__(self, section, section_list=None):
self._children = list()
if section_list:
self._children.extend(section_list._children)
self._children.append(section)
@property
def children(self):
return self._children
def __str__(self):
return "Record section list"
class RecordSectionNode(Node):
def __init__(self, identifier_list, type_denoter):
self.identifier_list = identifier_list
self.type_denoter = type_denoter
@property
def children(self):
return [self.identifier_list, self.type_denoter]
def __str__(self):
return "Record section"
class VariantPartNode(Node):
def __init__(self, variant_selector, variant_list):
self.variant_selector = variant_selector
self.variant_list = variant_list
@property
def children(self):
return [self.variant_selector, self.variant_list]
def __str__(self):
return "Variant part"
class VariantSelectorNode(Node):
def __init__(self, tag_type, tag_field=None):
self.tag_type = tag_type
self.tag_field = tag_field
@property
def children(self):
return [self.tag_type, self.tag_field]
def __str__(self):
return "Variant selector"
class VariantListNode(Node):
def __init__(self, variant, variant_list=None):
self._children = list()
if variant_list:
self._children.extend(variant_list._children)
self._children.append(variant)
@property
def children(self):
return self._children
def __str__(self):
return "Variant list"
class VariantNode(Node):
def __init__(self, case_list, record_list, variant_part):
self.case_list = case_list
self.record_list = record_list
self.variant_part = variant_part
@property
def children(self):
return [self.case_list,
self.record_list,
self.variant_part]
def __str__(self):
return "Variant"
class CaseConstListNode(Node):
def __init__(self, case_constant, case_constant_list=None):
self._children = list()
if case_constant_list:
self._children.extend(case_constant_list._children)
self._children.append(case_constant)
@property
def children(self):
return self._children
def __str__(self):
return "Case constant list"
class CaseConstNode(Node):
def __init__(self, constant):
self.constant = constant
@property
def children(self):
return [self.constant]
def __str__(self):
return "Case constant"
class CaseRangeNode(Node):
def __init__(self, first_constant, last_constant):
self.first_constant = first_constant
self.last_constant = last_constant
@property
def children(self):
return [self.first_constant, self.last_constant]
def __str__(self):
return "Case range"
class SetTypeNode(Node):
def __init__(self, base_type):
self.base_type = base_type
@property
def children(self):
return [self.base_type]
def __str__(self):
return "Set type"
class FileTypeNode(Node):
def __init__(self, component_type):
self.component_type = component_type
@property
def children(self):
return [self.component_type]
def __str__(self):
return "File type"
class PointerTypeNode(Node):
def __init__(self, domain_type):
self.domain_type = domain_type
@property
def children(self):
return [self.domain_type]
def __str__(self):
return "Pointer type"
class VarDeclListNode(Node):
def __init__(self, var_decl, var_decl_list=None):
self._children = list()
if var_decl_list:
self._children.extend(var_decl_list._children)
self._children.append(var_decl)
@property
def children(self):
return self._children
def __str__(self):
return "Variable declaration list"
class VarDeclNode(Node):
def __init__(self, identifier_list, type_denoter):
self.identifier_list = identifier_list
self.type_denoter = type_denoter
@property
def children(self):
return [self.identifier_list, self.type_denoter]
def __str__(self):
return "Variable declaration"
class FunctionListNode(Node):
def __init__(self, func, func_list=None):
self._children = list()
if func_list:
self._children.extend(func_list._children)
self._children.append(func)
@property
def children(self):
return self._children
def __str__(self):
return "Function list"
class ProcedureNode(Node):
def __init__(self, header, block, attr=None):
self.attr = attr
self.header = header
self.block = block
@property
def children(self):
return [self.attr,
self.header,
self.block]
def __str__(self):
return "Procedure"
class ProcedureHeadNode(Node):
def __init__(self, identifier, param_list=None):
self.identifier = identifier
self.param_list = param_list
@property
def children(self):
return [self.identifier, self.param_list]
def __str__(self):
return "Procedure head"
class ParameterListNode(Node):
def __init__(self, param, param_list=None):
self._children = list()
if param_list:
self._children.extend(param_list._children)
self._children.append(param)
@property
def children(self):
return self._children
def __str__(self):
return "Parameter list"
class ValueParameterNode(Node):
def __init__(self, identifier_list, type_denoter):
self.identifier_list = identifier_list
self.type_denoter = type_denoter
@property
def children(self):
return [self.identifier_list, self.type_denoter]
def __str__(self):
return "Value parameter"
class RefParameterNode(Node):
def __init__(self, identifier_list, type_denoter):
self.identifier_list = identifier_list
self.type_denoter = type_denoter
@property
def children(self):
return [self.identifier_list, self.type_denoter]
def __str__(self):
return "Reference parameter"
class FunctionNode(Node):
def __init__(self, header, block, attr=None):
self.attr = attr
self.header = header
self.block = block
@property
def children(self):
return [self.attr,
self.header,
self.block]
def __str__(self):
return "Function"
class FunctionHeadNode(Node):
def __init__(self, ret, identifier=None, param_list=None):
self.return_type = ret
self.identifier = identifier
self.param_list = param_list
@property
def children(self):
return [self.identifier, self.param_list]
def __str__(self):
return "Function head"
class StatementListNode(Node):
def __init__(self, stmt, stmt_list=None):
self._children = list()
if stmt_list:
self._children.extend(stmt_list._children)
if isinstance(stmt, StatementListNode):
self._children.extend(stmt._children)
else:
self._children.append(stmt)
@property
def children(self):
return filter(None, self._children)
def __str__(self):
return "Statement list"
class LabeledStatementNode(Node):
def __init__(self, label, stmt):
self.label = label
self.stmt = stmt
@property
def children(self):
return [self.label, self.stmt]
def __str__(self):
return "Labeled Statement"
class RepeatNode(Node):
def __init__(self, body, cond):
self.body = body
self.cond = cond
@property
def children(self):
return [self.cond, self.body]
def __str__(self):
return "Repeat"
class WhileNode(Node):
def __init__(self, cond, body):
self.cond = cond
self.body = body
@property
def children(self):
return [self.cond, self.body]
def __str__(self):
return "While"
class ForNode(Node):
def __init__(self, var, init_val, dir_, end_val, body):
self.var = var
self.value_start = init_val
self.direction = dir_
self.value_end = end_val
self.body = body
@property
def children(self):
return [self.var,
self.value_start,
self.value_end,
self.body]
def __str__(self):
return "For (%s)" % self.direction
class WithNode(Node):
def __init__(self, rec_var_list, statement_list):
self.rec_var_list = rec_var_list
self.statement_list = statement_list
@property
def children(self):
return [self.rec_var_list, self.statement_list]
def __str__(self):
return "With"
class IfNode(Node):
def __init__(self, expr, true_stmt, false_stmt=None):
self.expr = expr
self.iftrue = true_stmt
self.iffalse = false_stmt
@property
def children(self):
return [self.expr, self.iftrue, self.iffalse]
def __str__(self):
return "If"
class AssignmentNode(Node):
def __init__(self, var_access, expr):
self.var_access = var_access
self.expr = expr
@property
def children(self):
return [self.var_access, self.expr]
def __str__(self):
return "Assignment"
class PointerAccessNode(Node):
def __init__(self, var_access):
self.var_access = var_access
@property
def children(self):
return [self.var_access]
def __str__(self):
return "Pointer access"
class IndexedVarNode(Node):
def __init__(self, var_access, index_expr_list):
self.var_access = var_access
self.index_expr_list = index_expr_list
@property
def children(self):
return [self.var_access, self.index_expr_list]
def __str__(self):
return "Indexed variable"
class ExprListNode(Node):
def __init__(self, expr, expr_list=None):
self._children = list()
if expr_list:
self._children.extend(expr_list._children)
self._children.append(expr)
@property
def children(self):
return self._children
def __str__(self):
return "Expression list"
class FieldAccessNode(Node):
def __init__(self, var_access, identifier):
self.var_access = var_access
self.identifier = identifier
@property
def children(self):
return [self.var_access, self.identifier]
def __str__(self):
return "Field access"
class FunctionCallNode(Node):
def __init__(self, identifier, arg_list=None):
self.identifier = identifier
self.arg_list = arg_list
@property
def children(self):
return [self.identifier, self.arg_list]
def __str__(self):
return "Function call"
class ArgumentListNode(Node):
def __init__(self, expr, expr_list=None):
self._children = list()
if expr_list:
self._children.extend(expr_list._children)
self._children.append(expr)
@property
def children(self):
return self._children
def __str__(self):
return "Argument list"
class ArgumentNode(Node):
def __init__(self, expr):
self.expr = expr
@property
def children(self):
return [self.expr]
def __str__(self):
return "Argument"
class GotoNode(Node):
def __init__(self, label):
self.label = label
@property
def children(self):
return [self.label]
def __str__(self):
return "Goto"
class CaseStatementNode(Node):
def __init__(self, case_index, case_list_element_list, otherwise=None):
self.case_index = case_index
self.case_list_element_list = case_list_element_list
self.otherwise = otherwise
@property
def children(self):
return [self.case_index, self.case_list_element_list, self.otherwise]
def __str__(self):
return "Case statement"
class CaseListElementListNode(Node):
def __init__(self, case_list_element, case_list_element_list=None):
self._children = list()
if case_list_element_list:
self._children.extend(case_list_element_list._children)
self._children.append(case_list_element)
@property
def children(self):
return self._children
def __str__(self):
return "Case list element list"
class CaseListElementNode(Node):
def __init__(self, case_constant_list, statement):
self.case_constant_list = case_constant_list
self.statement = statement
@property
def children(self):
return [self.case_constant_list, self.statement]
def __str__(self):
return "Case list element"
class VarLoadNode(Node):
def __init__(self, var_access):
self.var_access = var_access
@property
def children(self):
return [self.var_access]
def __str__(self):
return "Variable load"
class NullNode(Node):
def __str__(self):
return "Null"
class SetNode(Node):
def __init__(self, member_list):
self.member_list = member_list
@property
def children(self):
return [self.member_list]
def __str__(self):
return "Set"
class SetEmptyNode(Node):
def __str__(self):
return "Set empty"
class SetMemberListNode(Node):
def __init__(self, member, member_list=None):
self._children = list()
if member_list:
self._children.extend(member_list._children)
self._children.append(member)
@property
def children(self):
return self._children
def __str__(self):
return "Set member list"
class SetMemberRangeNode(Node):
def __init__(self, member, expr):
self.member = member
self.expr = expr
@property
def children(self):
return [self.member, self.expr]
def __str__(self):
return "Set member range"
class OpNode(Node):
def __init__(self, name):
self.name = name
def __str__(self):
return "Operator (%s)" % str(self.name)
class IdentifierNode(Node):
def __init__(self, name):
self.name = name
def __str__(self):
return "Identifier (%s)" % self.name
class TypeConvertNode(Node):
def __init__(self, child):
assert isinstance(child, Node)
self.child = child
@property
def children(self):
return [self.child]
def __str__(self):
return "Type convert"
class VarReferenceNode(Node):
def __init__(self, var_access):
self.var_access = var_access
@property
def children(self):
return [self.var_access]
def __str__(self):
return "Variable reference" |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 22:26:06 2020
@author: jakerabinowitz
"""
import numpy as np
import pandas as pd
import statistics as stat
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# Problem 2a
def read_sat_data():
"""Read the satellite and sunspot data and convert sunspots to mean number per year.
Returns n_sunspots, n_satellites as numpy arrays.
"""
#Import the data
sun_df = pd.read_csv('SunspotNumber.dat.txt',
names=['year', 'month', 'day', 'sunspots'], # Give the names of the columns
delim_whitespace=True, # The default is to use ',' as the delimiter.
na_values=-1, # Tell pandas that -1 means No Data.
)
sat_df = pd.read_csv('SatelliteReentry.dat.txt',
names=['year', 'reentries'], # Give the names of the columns
delim_whitespace=True, # The default is to use ',' as the delimiter.
)
#Get the mean by year and make sure the years btw the two data sets match up
n_sunspots = sun_df.groupby(['year'])['sunspots'].mean()
n_sunspots = pd.DataFrame(n_sunspots.reset_index()) #Reset index to get year as a col name
sun_year_data = pd.DataFrame(n_sunspots[n_sunspots['year'].isin(list(sat_df["year"]))])
#Return numpy arrays
return sun_year_data["sunspots"].values, sat_df["reentries"].values
def sat_likelihood(data, theta):
"""Compute the likelihood of our data given a set of parameters theta.
In our case, data is a tuple of numpy arrays: (n_sunspots, n_reentries) and theta is the tuple (a,b).
The likelihood is a Gaussian, assuming the uncertainty in n_reentries is sqrt(n_reentries).
And note that since only relative likelihoods for different choices of theta are relevant,
you can ignore any constant factors that are independent of theta.
Returns the likelihood for this choice of theta.
"""
#get the uncertainty of reentries
sat_unc = np.sqrt(data[1])
#calculate the likelihood: data[0] = sunspots, data[1] = reentries, theat[0] = a, theta[1] = b
chisq = np.sum(((data[1] - theta[0] - theta[1] * data[0]) / sat_unc) ** 2)
likelihood = np.exp(-0.5 * chisq)
return likelihood
# Problem 2a (continued)
class MCMC(object):
"""Class that can run an MCMC chain using the Metropolis Hastings algorithm
This class is based heavily on the "Trivial Metropolis Hastings" algorithm discussed in lecture.
If you haven't used classes before, you can think of it as just a way of organizing the variables
and functions related to the MCMC operation.
You use it by creating an instance of the class as follows:
mcmc = MCMC(likelihood, data, theta, step_size)
The parameters here are:
likelihood is a function returning the likelihood p(data|theta), which needs to be
defined outside the class. The function should take two variables (data, theta) and
return a single value p(data | theta).
data is the input data in whatever form the likelihood function is expecting it.
This is fixed over the course of running an MCMC chain.
theta is a list or array with the starting parameter values for the chain.
step_size is a list or array with the step size in each dimension of theta.
Then once you have an MCMC object, you can use it by running the following functions:
mcmc.burn(nburn) runs the chain for nburn steps, but doesn't save the values.
mcmc.run(nsteps) runs the chain for nsteps steps, saving the results.
mcmc.accept_fraction() returns what fraction of the candidate steps were taken.
mcmc.get_samples() returns the sampled theta values as a 2d numpy array.
There are also simple two plotting functions that you can use to look at the behavior of the chain.
mcmc.plot_hist() plots a histogram of the sample values for each paramter. As the chain
runs for more steps, this should get smoother.
mcmc.plot_samples() plots the sample values over the course of the chain. If the burn in is
too short, it should be evident as a feature at the start of these plots.
Finally, there is only one method you need to write yourself.
mcmc.step() takes a single step of the chain.
"""
def __init__(self, likelihood, data, theta, step_size, names=None, seed=314159):
self.likelihood = likelihood
self.data = data
self.theta = np.array(theta)
self.nparams = len(theta)
self.step_size = np.array(step_size)
self.rng = np.random.RandomState(seed)
self.naccept = 0
self.current_like = likelihood(self.data, self.theta)
self.samples = []
if names is None:
names = ["Paramter {:d}".format(k+1) for k in range(self.nparams)]
self.names = names
def step(self, save=True):
"""Take a single step in the chain"""
#get a theta_new from Normal Dist centered on 0
theta_new = self.theta + self.rng.normal(0, self.step_size)
#self.theta is theta_old
ratio = sat_likelihood(self.data, theta_new) / sat_likelihood(self.data, self.theta)
#Decide whether or not to keep theta_new
if ratio >= 1:
self.theta = theta_new
self.current_like = self.likelihood(self.data, self.theta)
self.naccept += 1
else:
U = self.rng.uniform()
if U < ratio:
self.theta = theta_new
self.current_like = self.likelihood(self.data, self.theta)
self.naccept += 1
if save == True:
self.samples.append(self.theta)
def burn(self, nburn):
"""Take nburn steps, but don't save the results"""
for i in range(nburn):
self.step(save=False)
def run(self, nsteps):
"""Take nsteps steps"""
for i in range(nsteps):
self.step()
def accept_fraction(self):
"""Returns the fraction of candidate steps that were accpeted so far."""
if len(self.samples) > 0:
return float(self.naccept) / len(self.samples)
else:
return 0.
def clear(self, step_size=None, theta=None):
"""Clear the list of stored samples from any runs so far.
You can also change the step_size to a new value at this time by giving a step_size as an
optional parameter value.
In addition, you can reset theta to a new starting value if theta is not None.
"""
if step_size is not None:
assert len(step_size) == self.nparams
self.step_size = np.array(step_size)
if theta is not None:
assert len(theta) == self.nparams
self.theta = np.array(theta)
self.current_like = self.likelihood(self.data, self.theta)
self.samples = []
self.naccept = 0
def get_samples(self):
"""Return the sampled theta values at each step in the chain as a 2d numpy array."""
return np.array(self.samples)
def plot_hist(self):
"""Plot a histogram of the sample values for each parameter in the theta vector."""
all_samples = self.get_samples()
for k in range(self.nparams):
theta_k = all_samples[:,k]
plt.hist(theta_k, bins=100)
plt.xlabel(self.names[k])
plt.ylabel("N Samples")
plt.show()
def plot_samples(self):
"""Plot the sample values over the course of the chain so far."""
all_samples = self.get_samples()
for k in range(self.nparams):
theta_k = all_samples[:,k]
plt.plot(range(len(theta_k)), theta_k)
plt.xlabel("Step in chain")
plt.ylabel(self.names[k])
plt.show()
sun, sat = read_sat_data()
data = (sun, sat)
theta = (13.11, 0.110)
step_size = (1.1, 0.005)
likelihood = sat_likelihood(data, theta)
#Create an MCMC object
mcmc = MCMC(sat_likelihood, data, theta, step_size)
mcmc.run(int(1e5))
mcmc.get_samples()
ab_array = mcmc.get_samples()
#np.cov(ab_array)
a, b = zip(*ab_array)
a = np.asarray(a)
b = np.asarray(b)
# Problem 2b
def calculate_mean(mcmc):
"""Calculate the mean of each parameter according to the samples in the MCMC object.
Returns the mean values as a numpy array.
"""
sample_array = mcmc.get_samples()
#Seperate the 2D array into an a and b array
a, b = zip(*sample_array)
#Turn values into numpy arrays
a = np.asarray(a)
b = np.asarray(b)
return np.array([a.mean(), b.mean()])
calculate_mean(mcmc)
def calculate_cov(mcmc):
"""Calculate the covariance matrix of the parameters according to the samples in the MCMC object.
Returns the covariance matrix as a 2d numpy array.
"""
sample_array = mcmc.get_samples()
#Seperate the 2D array into an a and b array
a, b = zip(*sample_array)
a = np.asarray(a)
b = np.asarray(b)
#Turn to array
ab_np = np.array([a, b])
return np.cov(ab_np)
# Problem 2c
def plot_corner(mcmc):
"""Make a corner plot for the parameters a and b with contours corresponding to the same
delta chisq contours we drew in homework 4.
"""
# Hint: Use the corner.corner function
# YOUR CODE HERE
raise NotImplementedError()
import corner
import numpy as np
x = mcmc.get_samples()
fig = corner.corner(x, levels=(.1, .5,))
ax.set_title(r'$\chi^2(a,b)$)')
fig.suptitle("correct `one-sigma' level");
|
# -*- coding: utf-8 -*-
import Xerces
from __XQilla import *
# Initialize XQilla
XQillaPlatformUtils.initialize()
# XQillaImplementation
gXPath2_3_0 = Xerces.XMLString('XPath2 3.0')
|
#!/usr/bin/env python2
# -*-coding:utf-8-*-
# Author:SesameMing <blog.v-api.cn>
# Email:admin@v-api.cn
# Time:2017-03-28 16:45
def fact(x):
if x == 1:
return 1
else:
return x * fact(x-1)
print fact(6) |
#Import CSV to Postgresql
import psycopg2
import pandas as pd
conn = psycopg2.connect("host=localhost dbname=homework_users user=postgres password=Winchester110283")
cur = conn.cursor()
df_users = pd.read_csv('predefined_users.csv', index_col=0)
for idx, u in df_users.iterrows():
cur.execute('''INSERT INTO users (username, first_name, last_name, prog_lang, experience_yr, age, hw1_hrs) VALUES (%s,%s,%s,%s,%s,%s,%s)''', (u.username, u.first_name, u.last_name, u.prog_lang, u.experience_yr, u.age, u.hw1_hrs))
conn.commit()
cur.close()
conn.close() |
import boto3
# ec2を立ち上げてelbに紐付ける
# INSTANCE_ID, ARNはlambdaの環境変数で設定
# エラーハンドリングもできてないし、ログも出ない
def lambda_handler(event, context):
ec2 = boto3.client('ec2')
ec2_response = ec2.start_instances(
InstanceIds = [
INSTANCE_ID,
]
)
print ec2_response
waiter = ec2.get_waiter('instance_running')
waiter.wait(InstanceIds = [INSTANCE_ID])
elb = boto3.client('elb')
elb_response = elb.register_instances_with_load_balancer(
LoadBalancerName = LOAD_BALANCER_NAME,
Instances=[
{
'InstanceId': INSTANCE_ID
},
]
)
print elb_response |
from django import forms
import datetime
class CommentForm(forms.Form):
name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control mb-2', 'rows': 2, 'cols': 2}))
content = forms.CharField(max_length=1000, widget=forms.Textarea(attrs={'class': 'form-control mb-2', 'rows': 4, 'cols': 4})) |
# -*- python -*-
# This file contains rules for Bazel; see drake/doc/bazel.rst.
package(default_visibility = ["//visibility:public"])
cc_binary(
name = "schunk_driver",
srcs = [
"crc.h",
"defaults.h",
"position_force_control.h",
"position_force_control.cc",
"schunk_driver.cc",
"wsg.h",
"wsg_command_message.h",
"wsg_command_message.cc",
"wsg_command_sender.h",
"wsg_command_sender.cc",
"wsg_return_message.h",
"wsg_return_message.cc",
"wsg_return_receiver.h",
"wsg_return_receiver.cc",
],
linkstatic = 1,
deps = [
"@drake//lcmtypes:schunk",
"@gflags//:gflags",
"@lcm//:lcm",
]
)
|
# 从左到右,从上向下
# 一维数组打印成行,二维数组打印成矩阵,三维数组打印成矩阵列表
import numpy as np
print(np.arange(1,6,2))
print(np.arange(12).reshape(3,4)) # 可以改变输出形状
print(np.arange(24).reshape(2,3,4))# 2页,3行,4列
|
from django.conf.urls import url, patterns
from newswall.feeds import StoryFeed
from newswall import views
urlpatterns = patterns(
'',
url(r'^feed/$', StoryFeed()),
url(r'^get/$',
views.FeedDataView.as_view(),
name='newswall_feed_data'),
url(r'^$',
views.ArchiveIndexView.as_view(),
name='newswall_entry_archive'),
url(r'^(?P<year>\d{4})/$',
views.YearArchiveView.as_view(),
name='newswall_entry_archive_year'),
url(r'^(?P<year>\d{4})/(?P<month>\d{2})/$',
views.MonthArchiveView.as_view(),
name='newswall_entry_archive_month'),
url(r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/$',
views.DayArchiveView.as_view(),
name='newswall_entry_archive_day'),
url(r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$',
views.DateDetailView.as_view(),
name='newswall_entry_detail'),
url(r'^source/(?P<slug>[-\w]+)/$',
views.SourceArchiveIndexView.as_view(),
name='newswall_source_detail'),
)
|
"""
The JSON Extension adds the :class:`JsonOutputHandler` to render
output in pure JSON, as well as the :class:`JsonConfigHandler` that allows
applications to use JSON configuration files as a drop-in replacement of
the default :class:`cement.ext.ext_configparser.ConfigParserConfigHandler`.
Requirements
------------
* No external dependencies.
Configuration
-------------
This extension does not support any configuration settings.
Usage
_____
**myapp.conf**
.. code-block:: json
{
"myapp": {
"foo": "bar"
}
}
**myapp.py**
.. code-block:: python
from cement.core.foundation import CementApp
class MyApp(CementApp):
class Meta:
label = 'myapp'
extensions = ['json']
config_handler = 'json'
# you probably don't want this to be json by default.. but you can
# output_handler = 'json'
with MyApp() as app:
app.run()
# create some data
data = dict(foo=app.config.get('myapp', 'foo'))
app.render(data)
In general, you likely would not set ``output_handler`` to ``json``, but
rather another type of output handler that display readable output to the
end-user (i.e. Mustache, Genshi, or Tabulate). By default Cement
adds the ``-o`` command line option to allow the end user to override the
output handler. For example: passing ``-o json`` will override the default
output handler and set it to ``JsonOutputHandler``.
See ``CementApp.Meta.handler_override_options``.
.. code-block:: console
$ python myapp.py -o json
{"foo": "bar"}
What if I Want To Use UltraJson or Something Else?
--------------------------------------------------
It is possible to override the backend ``json`` library module to use, for
example if you wanted to use UltraJson (``ujson``) or another
**drop-in replacement** library. The recommended solution would be to
override the ``JsonOutputHandler`` with you're own sub-classed version, and
modify the ``json_module`` meta-data option.
.. code-block:: python
from cement.ext.ext_json import JsonOutputHandler
class MyJsonHandler(JsonOutputHandler):
class Meta:
json_module = 'ujson'
# then, the class must be replaced via a 'post_setup' hook
def override_json(app):
app.handler.register(MyJsonHandler, force=True)
app.hook.register('post_setup', override_json)
"""
from ..core import output
from ..utils.misc import minimal_logger
from ..ext.ext_configparser import ConfigParserConfigHandler
LOG = minimal_logger(__name__)
def suppress_output_before_run(app):
"""
This is a ``post_argument_parsing`` hook that suppresses console output if
the ``JsonOutputHandler`` is triggered via command line.
:param app: The application object.
"""
if not hasattr(app.pargs, 'output_handler_override'):
return
elif app.pargs.output_handler_override == 'json':
app._suppress_output()
def unsuppress_output_before_render(app, data):
"""
This is a ``pre_render`` that unsuppresses console output if
the ``JsonOutputHandler`` is triggered via command line so that the JSON
is the only thing in the output.
:param app: The application object.
"""
if not hasattr(app.pargs, 'output_handler_override'):
return
elif app.pargs.output_handler_override == 'json':
app._unsuppress_output()
def suppress_output_after_render(app, out_text):
"""
This is a ``post_render`` hook that suppresses console output again after
rendering, only if the ``JsonOutputHandler`` is triggered via command
line.
:param app: The application object.
"""
if not hasattr(app.pargs, 'output_handler_override'):
return
elif app.pargs.output_handler_override == 'json':
app._suppress_output()
class JsonOutputHandler(output.CementOutputHandler):
"""
This class implements the :ref:`IOutput <cement.core.output>`
interface. It provides JSON output from a data dictionary using the
`json <http://docs.python.org/library/json.html>`_ module of the standard
library. Please see the developer documentation on
:ref:`Output Handling <dev_output_handling>`.
This handler forces Cement to suppress console output until
``app.render`` is called (keeping the output pure JSON). If
troubleshooting issues, you will need to pass the ``--debug`` option in
order to unsuppress output and see what's happening.
"""
class Meta:
"""Handler meta-data"""
interface = output.IOutput
"""The interface this class implements."""
label = 'json'
"""The string identifier of this handler."""
#: Whether or not to include ``json`` as an available choice
#: to override the ``output_handler`` via command line options.
overridable = True
#: Backend JSON library module to use (`json`, `ujson`)
json_module = 'json'
def __init__(self, *args, **kw):
super(JsonOutputHandler, self).__init__(*args, **kw)
self._json = None
def _setup(self, app):
super(JsonOutputHandler, self)._setup(app)
self._json = __import__(self._meta.json_module,
globals(), locals(), [], 0)
def render(self, data_dict, template=None, **kw):
"""
Take a data dictionary and render it as Json output. Note that the
template option is received here per the interface, however this
handler just ignores it. Additional keyword arguments passed to
``json.dumps()``.
:param data_dict: The data dictionary to render.
:keyword template: This option is completely ignored.
:returns: A JSON encoded string.
:rtype: ``str``
"""
LOG.debug("rendering output as Json via %s" % self.__module__)
return self._json.dumps(data_dict, **kw)
class JsonConfigHandler(ConfigParserConfigHandler):
"""
This class implements the :ref:`IConfig <cement.core.config>`
interface, and provides the same functionality of
:ref:`ConfigParserConfigHandler <cement.ext.ext_configparser>`
but with JSON configuration files.
"""
class Meta:
"""Handler meta-data."""
label = 'json'
#: Backend JSON library module to use (`json`, `ujson`).
json_module = 'json'
def __init__(self, *args, **kw):
super(JsonConfigHandler, self).__init__(*args, **kw)
self._json = None
def _setup(self, app):
super(JsonConfigHandler, self)._setup(app)
self._json = __import__(self._meta.json_module,
globals(), locals(), [], 0)
def _parse_file(self, file_path):
"""
Parse JSON configuration file settings from file_path, overwriting
existing config settings. If the file does not exist, returns False.
:param file_path: The file system path to the JSON configuration file.
:returns: boolean
"""
self.merge(self._json.load(open(file_path)))
# FIX ME: Should check that file was read properly, however if not it
# will likely raise an exception anyhow.
return True
def load(app):
app.hook.register('post_argument_parsing', suppress_output_before_run)
app.hook.register('pre_render', unsuppress_output_before_render)
app.hook.register('post_render', suppress_output_after_render)
app.handler.register(JsonOutputHandler)
app.handler.register(JsonConfigHandler)
|
from database_services.RDBService import RDBService
class UserRDBService(RDBService):
def __init__(self):
pass
@classmethod
def get_user_and_address(cls, template):
wc, args = RDBService.get_where_clause_args(template)
sql = "select * from db.users left join db.addresses on " + \
"db.primary_address_id = db.addresses.id"
res = RDBService.run_sql(sql, args, fetch=True)
return res
|
# -*- coding: utf-8 -*-
"""
@Project : show_project
@File : savedb.py
@Author : 王白熊
@Data : 2021/3/22 14:20
"""
# -*- coding:UTF-8 -*-
import pandas as pd
from Log import Logger
from sqlalchemy import create_engine
logger = Logger('excel_to_db').getlog()
import psycopg2
from io import StringIO
def psycopg2_function(df, table_name, host='10.10.10.42', port='5432', user='DRCRM', passwd='123456', db='DRCRM_ZHGS'):
flag = False
output = StringIO()
df.to_csv(output, sep='\t', index=False, header=False)
output1 = output.getvalue()
conn = psycopg2.connect(host=host, port=port, user=user, password=passwd, dbname=db)
cur = conn.cursor()
# 判断表格是否存在,不存在则创建
try:
cur.execute("select to_regclass(" + "\'" + table_name + "\'" + ") is not null")
rows = cur.fetchall()
except Exception as e:
rows = []
if rows:
data = rows
flag = data[0][0]
if flag != True:
sql = f'''CREATE TABLE "public"."{table_name}" ( \
"traffic_report_obstacle_2d_id" int8 NOT NULL DEFAULT nextval('traffic_report_obstacle_2d_traffic_report_obstacle_2d_id_seq'::regclass),
"id" int4,
"timestamp" float8,
"center_x" float8,
"center_y" float8,
"center_z" float8,
"length" float8,
"width" float8,
"height" float8,
"obj_type" int4,
"velocity_x" float8,
"velocity_y" float8,
"velocity_z" float8,
"angular_velocity" float8,
"acceleration_x" float8,
"acceleration_y" float8,
"acceleration_z" float8,
"local_timestamp" timestamp(6),
"frame_number" int8 NOT NULL DEFAULT 0,
"theta" float8,
"track_id" int4,
"lane_ids" varchar(255) COLLATE "pg_catalog"."default",
"connection_ids" varchar(255) COLLATE "pg_catalog"."default",
"det_confidence" float8,
"obs_drsuids" varchar COLLATE "pg_catalog"."default",
"is_valid" bool
)
;'''
cur.execute(sql)
conn.commit()
logger.info(f'create table {table_name} success. ')
# 获取列名
columns = list(df)
cur.copy_from(StringIO(output1), table_name, null='', columns=columns)
conn.commit()
cur.close()
conn.close()
def excel_to_DB(data, host='10.10.10.42', port='5432', user='DRCRM', passwd='123456', db='DRCRM_ZHGS',
table_name='traffic_obstacle_2d_00'):
"""
表数据存入DB
:param host: 数据库地址
:param port: 数据库端口
:param user: 数据库账号
:param passwd: 账号密码
:param db: 数据库名
:param table_name: 存入数据库的表名
"""
if isinstance(data, pd.core.frame.DataFrame):
dataframe = data
elif isinstance(data, str) and data.endswith('.xlsx'):
dataframe = pd.read_excel(data, engine='openpyxl')
else:
logger.error('不支持的类型')
# dataframe = pd.DataFrame(data_excel)
logger.info('read excel success. ')
# DataFrame.to_sql方法
# engine = create_engine(f'postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{db}', encoding='utf8')
# logger.info(f'create_engine() connect DB "{host}--{db}" success')
# data_dataframe.to_sql(table_name, con=engine, if_exists='replace', index=False)
# copy_from 方法
psycopg2_function(dataframe, table_name, host, port, user, passwd, db)
logger.info('write db success. ')
if __name__ == '__main__':
import time
# ticks_min = time.strftime("%Y-%m-%d %H:%M", time.localtime())
# ticks_min_timestramp = int(time.mktime(time.strptime(ticks_min, "%Y-%m-%d %H:%M")))
rq = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
table_name = f'traffic_report_obstacle_2d_wth_{rq}'
excel_to_DB(r'data\merge_data\merge_add_drsu.xlsx',
host='10.10.10.42', port='5432', user='DRCRM', passwd='123456', db='DRCRM_ZHGS_3',
table_name=table_name)
|
import constants as const
def get_value_by_key_from_answer(answer, key):
return answer[key]
def split_chosen_coffee_with_price(answer):
chosen_coffee = get_value_by_key_from_answer(answer, const.COFFEE_TYPE)
spl = chosen_coffee.split()
coffee_type = spl[0]
price_for_one_coffee = int(spl[1])
currency_type = spl[-1]
return coffee_type, price_for_one_coffee, currency_type
def calculate_price_for_order(answer):
quantity = get_value_by_key_from_answer(answer, const.QUANTITY)
_, price_for_one_coffee, _ = split_chosen_coffee_with_price(answer)
return price_for_one_coffee * quantity
def preparing_data_for_creating_table(answer):
"""Creating columns and rows for creating table with bill"""
set_of_correct_columns = (const.COFFEE_TYPE, const.QUANTITY, const.ADDITIONAL_INGREDIENTS)
rows, columns = [], []
for key in answer:
if key not in set_of_correct_columns:
continue
elif type(answer.get(key)) is list:
rows.append(f"{', '.join(answer[key])}")
columns.append(key)
else:
rows.append(answer[key])
columns.append(key)
total_price_for_current_order = calculate_price_for_order(answer)
columns.append("Total in USD")
rows.append(f"{total_price_for_current_order}")
rows_list = (rows,) # need do to it for creating correct table
return rows_list, columns
|
from functools import reduce
from math import log, pow, inf
from statistics import mean
BASE = 2
JUMP = 0.05
def frange(x, y, jump):
"""
Like `range()`, but for floats
http://stackoverflow.com/a/7267280/2250435
"""
while x < y:
yield x
x += jump
def trigram_lambdas():
"""
Enumerate all lambdaA and lambdaB combinations for a trigram model
Yields each possible value for (a) and (b) such that they do not exceed 1
when added together
"""
for a in frange(0, 1, JUMP):
for b in frange(0, 1, JUMP):
if (a + b) < 1:
yield (a, b)
def find_negative_log(a):
if a == 0:
return a
return -log(a, BASE)
def get_perplexity_from_probabilities(probabilities):
"""
Combine a bunch of probabilties to get the entropy
Arguments:
probabilities (list(float)): probabilities to combine
Return:
(float): the entropy
"""
modified_probabilities = map(find_negative_log, probabilities)
entropy = reduce(lambda a, b: a + b, modified_probabilities, 0)
entropy /= len(list(probabilities))
return pow(BASE, entropy)
class ProceedingWord(object):
"""
Store information about a proceeding word
"""
def __init__(self, word, preceeding_words=None):
self.word = word
self.occurences = 0
self.preceeding = preceeding_words
@property
def probability(self):
return self.occurences / self.preceeding.total_occurences
def __eq__(self, other):
if isinstance(other, str):
return self.word == other
return self.word == other.word
class PreceedingWords(object):
"""
Store infomation about a set of preceeding words and the words that came
after them
"""
def __init__(self, words):
self.words = words
self.proceeding_words = dict()
self.total_occurences = 0
def add_occurence(self, word):
"""
Add another word to the occurences, adjusting the probability of all
possible proceeding words based on the new one
"""
if word not in self.proceeding_words:
self.proceeding_words[word] = ProceedingWord(word, self)
self.proceeding_words[word].occurences += 1
self.total_occurences += 1
@property
def next_word(self):
"""
Return the word that is most likely to be the next word, based on the
set that are repesented by this object
Returns:
(ProceedingWord): the word that is most likely to be next
"""
def highest_probability(a, b):
if a.probability > b.probability:
return a
else:
return b
if len(self.proceeding_words) == 0:
return None
return reduce(highest_probability, self.proceeding_words.values())
def __hash__(self):
return hash(self.words)
def __eq__(self, other):
return hash(self) == hash(other)
class NGram(object):
"""
NGram
Stores information about the probability of some word occuring, given `N`
preceeding words.
In cases where we're doing a Unigram model, tracking 0 words historically,
this structure continues to work by linking all words to an empty tuple of
previous words.
Arguments:
- n (int): the number of historical words to base suggestions on
"""
def __init__(self, n):
self.n = n
self.dictionary = dict()
def add(self, word, preceeding=()):
"""
Add a new preceeding words/next word combination to the data store
If the preceeding tuple is not specified, the assumption will be made
that we're doing a Unigram model and everything should be linked to
an empty tuple.
Arguments:
word (string): The word to add
preceeding_words (tuple): N number of preceeding words
"""
self._ensure_preceeding(preceeding)
self.dictionary[preceeding].add_occurence(word)
def next_word(self, words=()):
"""
Given some set of words, return the most likely next word
Arguments:
words (tuple): The previous words
Returns:
(string): the most likely next word
"""
self._ensure_preceeding(words)
return self.dictionary[words].next_word.word
def probability_of(self, next_word, previous=()):
try:
return self.dictionary[previous] \
.proceeding_words[next_word].probability
except KeyError:
return 0
def _ensure_preceeding(self, preceeding):
"""
Ensure that there is a dictionary entry for the preceeding word set
"""
if preceeding not in self.dictionary:
self.dictionary[preceeding] = PreceedingWords(preceeding)
class Model(object):
def __init__(self, description):
self.description = description
self.unigram = NGram(0)
self.bigram = NGram(1)
self.trigram = NGram(2)
@property
def is_smoothed_bigram(self):
return self.description == '2s'
@property
def is_smoothed_trigram(self):
return self.description == '3s'
def train(self, sentences):
"""
Train the model on the given sentences
Feed the given sentences to the NGrams, so that we can train the model
on their structure.
Arguments:
sentences (list(Sentence)): the sentences to train against
"""
if self.description == '1':
self._unigram(sentences)
if self.description == '2':
self._bigram(sentences)
if self.description == '2s':
self._unigram(sentences)
self._bigram(sentences)
if self.description == '3':
self._trigram(sentences)
if self.description == '3s':
self._unigram(sentences)
self._bigram(sentences)
self._trigram(sentences)
def evaluate(self, sentences):
"""
Evaluate some sentences based on the model, calculating their
perplexity values
Arguments:
sentences (list(Sentence)): the sentences to evaluate
Return:
(list(float)): the corresponding perplexity values
"""
perplexities = list()
for sentence in sentences:
couple = (sentence, self.perplexity_of(sentence))
perplexities.append(couple)
return perplexities
def tune(self, sentences):
if self.is_smoothed_bigram:
self._tune_bigram(sentences)
elif self.is_smoothed_trigram:
self._tune_trigram(sentences)
else:
raise RuntimeError('Unexpected model type: {}'
.format(self.description))
def _tune_bigram(self, sentences):
best_lambda = None
best_perplexity = inf
for i in frange(0, 1, JUMP):
self.lamA = i
p = self._average_perplexity_for(sentences)
if p < best_perplexity:
best_perplexity = p
best_lambda = i
self.lamA = best_lambda
print('Best Lambda A: {}'.format(self.lamA))
def _tune_trigram(self, sentences):
best_lambdas = (None, None)
best_perplexity = inf
for (a, b) in trigram_lambdas():
self.lamA = a
self.lamB = b
p = self._average_perplexity_for(sentences)
if p < best_perplexity:
best_perplexity = p
best_lambdas = (a, b)
self.lamA = best_lambdas[0]
self.lamB = best_lambdas[1]
def _average_perplexity_for(self, sentences):
d = self.evaluate(sentences)
return mean(list(map(lambda couple: couple[1], d)))
def perplexity_of(self, sentence):
if self.description == '1':
return self._unigram_perplexity_of(sentence)
if self.description == '2':
return self._bigram_perplexity_of(sentence)
if self.description == '2s':
uni = self._unigram_perplexity_of(sentence)
bi = self._bigram_perplexity_of(sentence)
return self._smooth(unigram=uni, bigram=bi)
if self.description == '3':
return self._trigram_perplexity_of(sentence)
if self.description == '3s':
uni = self._unigram_perplexity_of(sentence)
bi = self._bigram_perplexity_of(sentence)
tri = self._trigram_perplexity_of(sentence)
return self._smooth(unigram=uni, bigram=bi, trigram=tri)
def _unigram(self, sentences):
"""
Run a Unigram on the given sentences
Arguments:
sentences (list(sentences)): the sentences to run against
"""
for sentence in sentences:
for word in sentence:
self.unigram.add(word)
def _bigram(self, sentences):
"""
Run a Bigram on the given sentences
Arguments:
sentences (list(sentences)): the sentences to run against
"""
for sentence in sentences:
words = list(sentence)
words.insert(0, '<pre>')
for i in range(0, len(words) - 1):
pre = words[i]
post = words[i + 1]
self.bigram.add(post, (pre))
def _trigram(self, sentences):
"""
Run a Trigram on the given sentences
Arguments:
sentences (list(sentences)): the sentences to run against
"""
for sentence in sentences:
words = list(sentence)
words.insert(0, '<pre>')
words.insert(0, '<pre>')
for i in range(0, len(words) - 2):
pre = words[i]
pre2 = words[i + 1]
post = words[i + 2]
self.bigram.add(post, (pre, pre2))
def _unigram_perplexity_of(self, sentence):
"""
Find the unigram perplexity of a sentence
Arguments:
sentence (Sentence): the sentence to process
Returns:
(float): the perplexity
"""
probabilities = list(map(lambda a: self.unigram.probability_of(a),
sentence))
return get_perplexity_from_probabilities(probabilities)
def _bigram_perplexity_of(self, sentence):
"""
Find the bigram perplexity of a sentence
Arguments:
sentence (Sentence): the sentence to process
Returns:
(float): the perplexity
"""
probabilities = list()
words = list(sentence)
words.insert(0, '<pre>')
for i in range(0, len(sentence)):
previous = words[i]
current = words[i + 1]
probabilities.append(self.bigram.probability_of(
current, (previous)))
return get_perplexity_from_probabilities(probabilities)
def _trigram_perplexity_of(self, sentence):
"""
Find the trigram perplexity of a sentence
Arguments:
sentence (Sentence): the sentence to process
Returns:
(float): the perplexity
"""
probabilities = list()
words = list(sentence)
words.insert(0, '<pre>')
words.insert(0, '<pre>')
for i in range(0, len(sentence)):
previous = words[i]
previous2 = words[i + 1]
current = words[i + 2]
probabilities.append(self.bigram.probability_of(
current, (previous, previous2)))
return get_perplexity_from_probabilities(probabilities)
def _smooth(self, unigram=None, bigram=None, trigram=None):
"""
Smooth the perplexity of the sentence based on the provided parts
Returns:
(float): the smoothed perplexity value
"""
if self.description == '2s':
return self._smooth2(unigram, bigram)
if self.description == '3s':
return self._smooth3(unigram, bigram, trigram)
raise Exception('Expected model to be smoothed; was {}'
.format(self.desription))
def _smooth2(self, unigram, bigram):
"""
Smooth a bigram model
"""
return (self.lamA * unigram +
(1 - self.lamA) * bigram)
def _smooth3(self, unigram, bigram, trigram):
"""
Smooth a trigram model
"""
return (self.lamA * unigram +
self.lamB * bigram +
(1 - self.lamA - self.lamB) * trigram)
|
#
# @lc app=leetcode id=12 lang=python3
#
# [12] Integer to Roman
#
# @lc code=start
class Solution:
def intToRoman(self, num: int) -> str:
dic = {1000: 'M', 900: 'CM', 500: 'D', 400: 'CD', \
100: 'C', 90: 'XC', 50: 'L', 40: 'XL', 10: 'X', 9: 'IX', 5: 'V', 4: 'IV', 1: 'I'}
res = ""
for i in dic:
res += (num // i) * dic[i]
num %= i
return res
# @lc code=end
|
# AirBnB RPA Functions
import os
import rpa as r
import shutil
URL = "https://www.airbnb.com.sg"
USERNAME = "rpatestuser001@gmail.com"
PASSWORD = "P@$$w0rd123"
def initialize():
print('Initializing...')
r.init()
r.timeout(15) #set timeout to wait longer
r.url(URL)
while r.exist('//*/button[@type="submit"]') == False:
r.url(URL)
print("Wrong page detected, retrying..")
r.click('//*/button[@type="submit"]') # Anti RPA by AirBnB
print('Done.')
def recreate_temp(): #for image download
print('Clearing Temp Files..')
shutil.rmtree('data/',ignore_errors=True)
os.mkdir('data')
os.mkdir('data/1')
os.mkdir('data/2')
os.mkdir('data/3')
os.mkdir('data/4')
os.mkdir('data/5')
print('Done.')
def login(): # GG CAPTCHA (abandoned ship)
r.click('//header[@role="banner"]/div/div/div[3]/div/div/nav/ul/li[6]')
r.wait(10)
if r.present('//div[@aria-label="Log in"]/div[2]/div[4]/button') == True:
# Anti RPA by AirBnB
r.click('//div[@aria-label="Log in"]/div[2]/div[4]/button')
if r.present('//button[@data-testid="social-auth-button-email"]') == True:
# Anti RPA by AirBnB
r.click('//button[@data-testid="social-auth-button-email"]')
r.type('//*[@id="email"]', USERNAME)
r.type('//*[@id="password"]', PASSWORD)
r.click('//button[@data-veloute="submit-btn-cypress"]')
r.click('//*[@id="recaptcha-anchor"]/div[1]')
def logout(): # GG CAPTCHA (abandoned ship)
r.click('//header[@role="banner"]/div/div/div[3]/div/div/nav/ul/li[6]')
r.click('//*[@id="headerNavUserMenu"]/li[8]/form/button')
def select_stay():
print('Selecting "Stays"...')
r.click('//*[@id="Koan-via-SearchHeader__input"]')
r.click('//*[@id="Koan-via-SearchHeader__option-1"]')
print('Done.')
def enter_country_city(city):
print(f'Entering City Information...{city}')
#r.click('//*[@aria-label="Search"]')
r.type('//*[@placeholder="Add city, landmark, or address"]', city)
#r.type('//*[@id="Koan-via-SearchHeader__input"]', city)
#r.click('//*[@id="Koan-via-SearchHeader__option-0"]')
print('Done.')
def close_cookie_popup():
if r.present("//button[@class='optanon-allow-all accept-cookies-button']") == True: r.click("//button[@class='optanon-allow-all accept-cookies-button']")
def monthyearconversion(datestamp):
monthyear=["",""]
monthyear = datestamp.split(" ", 1)
return(monthyear[1])
def monthyearnavigate(monthyear):
compare = r.read('//*[@aria-roledescription="datepicker"]/div/div[1]/div[2]/div/div/div')
while(monthyear != compare):
r.click('//*[contains(@aria-label,"Next")]')
compare = r.read(
'//*[@aria-roledescription="datepicker"]/div/div[1]/div[2]/div/div/div')
def enter_dates(checkin, checkout):
r.click('//*[@role="search"]/div/div/div[3]/div/button')
print(f'Entering Check in and Check out Dates..{checkin} to {checkout}')
checkinmonthyear = monthyearconversion(checkin)
# r.click('//*[@id="menuItemButton-date_picker"]/button')
# r.click('//*[@id="filter-menu-chip-group"]/div[2]/*')
monthyearnavigate(checkinmonthyear)
r.click(f'//*[contains(@aria-label,"{checkin}")]')
monthyearnavigate(monthyearconversion(checkout))
r.click(f'//*[contains(@aria-label,"{checkout}")]')
print('Done.')
def enter_personnel(adult,child,infant):
r.click('//*[@role="search"]/div/div/div[5]/div/button')
print('Entering Personnel Information..')
#r.click('//*[@id="filter-menu-chip-group"]/div[3]/*')
if r.exist('(//*[@aria-label="increase value"])[1]') == True:
for _i in range (adult):
r.click('(//*[@aria-label="increase value"])[1]')
for _i in range (child):
r.click('(//*[@aria-label="increase value"])[2]')
for _i in range (infant):
r.click('(//*[@aria-label="increase value"])[3]')
else:
for _i in range (adult):
r.click('//*[@aria-describedby="subtitle-label-stepper-adults"][2]')
for _i in range (child):
r.click('//*[@aria-describedby="subtitle-label-stepper-children"][2]')
for _i in range (infant):
r.click('//*[@aria-describedby="subtitle-label-stepper-infants"][2]')
#r.click('//*[@id="filter-panel-save-button"]')
print('Done.')
def click_search():
r.click('//button[@type="submit"]')
def snap_map():
print('Downloading Map..')
r.snap('//*[@id="ExploreLayoutController"]/div/div[3]/aside/div',"map.jpg")
print('Done."')
def get_stay_url():
url= [None] * 10 #catching top 10 in case of airbnb plus
if (r.exist('//*[@id="FMP-target"]/div/div/div/div/div[1]/div/div/div/div[1]/a') == True):
url[0]=URL+r.read('//*[@id="FMP-target"]/div/div/div/div/div[1]/div/div/div/div[1]/a/@href')
for i in range(2,11) : url[i-1]=URL+r.read(f'//*[@id="FMP-target"]/div/div/div/div/div[{i}]/div/div/div/div[1]/a/@href')
else:
url[0]=URL+r.read('//*[@itemprop="itemList"]/div/div/div/div[2]/div/div/div/div/div[1]/div/div/div/div/a/@href')
for i in range(2,11) : url[i-1]=URL+r.read(f'//*[@itemprop="itemList"]/div/div/div/div[2]/div/div/div/div/div[{i}]/div/div/div/div/a/@href')
#print(url)
return(url)
def extract_stay_info_as_data(): #Generates URL/text in dict instead, shorten time for upload/download, more unified
data = {
"0" : {
"name" : "",
"description" : "",
"inventory" : "",
"price" : "",
"rating" : "",
"picurl" : [None] * 10 ,
"pictext" : [None] * 10,
"url" : "",
"coordinates" : ""
},
"1" : {
"name" : "",
"description" : "",
"inventory" : "",
"price" : "",
"rating" : "",
"picurl" : [None] * 10 ,
"pictext" : [None] * 10,
"url" : "",
"coordinates" : ""
},
"2" : {
"name" : "",
"description" : "",
"inventory" : "",
"price" : "",
"rating" : "",
"picurl" : [None] * 10 ,
"pictext" : [None] * 10,
"url" : "",
"coordinates" : ""
},
"3" : {
"name" : "",
"description" : "",
"inventory" : "",
"price" : "",
"rating" : "",
"picurl" : [None] * 10 ,
"pictext" : [None] * 10,
"url" : "",
"coordinates" : ""
},
"4" : {
"name" : "",
"description" : "",
"inventory" : "",
"price" : "",
"rating" : "",
"picurl" : [None] * 10 ,
"pictext" : [None] * 10,
"url" : "",
"coordinates" : ""
}
}
print('Extracting Top 5 Stay Picture Information (10 Image Max)..')
url=[]
url=get_stay_url()
i=0
k=0
while (i<5):
data[str(i)]["url"]=url[i+k]
r.url(url[i+k])
print(f'Extracting Text Data - Homestay {i+1}')
if (r.exist('//*[@itemprop="name"]/span/h1/span') == True):
data[str(i)]["coordinates"]=r.read('//*[@data-veloute="map/GoogleMap"]/div/div/div/div[2]/a/@href').split("=", 1)[1].split("&",1)[0]
data[str(i)]["name"]=r.read('//*[@itemprop="name"]/span/h1/span')
data[str(i)]["description"]=r.read('//*[@href="#neighborhood"]/div')
#data[str(i)]["description"]=data[str(i)]["description"].replace("\xa0"," ")
data[str(i)]["inventory"]= r.read('//*[@id="room"]/div[2]/div/div[2]/div/div/div[3]/div/div/div[1]/div/div/div[1]/div') + " " + r.read('//*[@id="room"]/div[2]/div/div[2]/div/div/div[3]/div/div/div[1]/div/div/div[2]/div') + " " + r.read('//*[@id="room"]/div[2]/div/div[2]/div/div/div[3]/div/div/div[1]/div/div/div[3]/div') + " " + r.read('//*[@id="room"]/div[2]/div/div[2]/div/div/div[3]/div/div/div[1]/div/div/div[4]/div')
if (r.present('//*[@id="book_it_form"]/div[4]/div[2]') == True): data[str(i)]["price"]=r.read('//*[@id="book_it_form"]/div[4]/div[2]').split("Total",1)[1]
else: data[str(i)]["price"]=r.read('//*[@id="book_it_form"]/div[2]').split("Total",1)[1] #Total Price
if r.present('//*[@data-heading-focus="review header"]/div'): data[str(i)]["rating"]=r.read('//*[@data-heading-focus="review header"]/div/div/@aria-label')+" ("+r.read('//*[@data-heading-focus="review header"]/div/span')+")"
else: data[str(i)]["rating"]="No Reviews Yet"
r.click('//*[@data-veloute="hero-view-photos-button"]')
j=0
while (1):
j=j+1
print(f'Extracting Picture Data - Homestay {i+1} Photo {j}')
r.wait(0.4)
#r.snap('//div[@data-testid="photo-viewer-slideshow-desktop"]/div/div/div/div/div/img',f"data/{i+1}/{j}.jpg") #fastest but not perfect
if (r.exist('//img[@data-veloute="slideshow-image"]/@src') == True):
data[str(i)]["picurl"][j-1]=r.read('//img[@data-veloute="slideshow-image"]/@src')
if (r.present('//*[@data-veloute="slideshow-modal"]/div/div/div[2]/div[2]/div[2]/div[2]/div') == True):
data[str(i)]["pictext"][j-1]=r.read('//*[@data-veloute="slideshow-modal"]/div/div/div[2]/div[2]/div[2]/div[2]/div')
#r.download(dl_link,f'data/{i+1}/{j}.jpg')
print(f'Homestay {i+1} Photo {j} extracted!')
if (r.exist('//button[@aria-label="Next"]') == False or j >= 7): break
r.click('//button[@aria-label="Next"]')
else:
i=i-1 #Detects Whales (Airbnb Plus spoils the format alot)
k=k+1 #Compensating Constant k
print("WHALE detected, adding one more loop..")
i=i+1
#r.click('/html/body/div[9]/div/div/div/section/div/div[1]/div/button')
print('Done.')
return data
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-02-26 06:57:59
# @Author : mutudeh (josephmathone@gmail.com)
# @Link : ${link}
# @Version : $Id$
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def levelOrder(self, root):
if not root:
return []
level_order = []
def dfs(node,level):
"""
level: current level of the node
"""
if len(level_order) == level:
# if it is the first time to start from this level
level_order.append([])
level_order[level].append(node.val)
if node.left:
dfs(node.left,level + 1)
if node.right:
dfs(node.right,level + 1)
dfs(root,0)
return level_order
s = Solution()
head = TreeNode(1)
head.left = TreeNode(2)
head.right = TreeNode(3)
head.right.left = TreeNode(4)
head.right.right = TreeNode(5)
print(s.levelOrder(head)) |
import os
import zlib
import math
import struct
import copy
import chromosome.gene as gene
import chromosome.serializer as serializer
import chromosome.deserializer as deserializer
PNG_SIGNATURE = '\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'
class PNGGene(gene.AbstractGene):
'''
The PNGGene represent a png chunk.
Using the PNGDeserializer, we read the contents of a PNG file,
and hold them into memory. Each PNG chunk corresponds to a PNGGene
object. The contents of the PNG chunk are fuzzed in memory. We have
the capability to fuzz specific parts of the chunk's contents. For
example, it is useless to fuzz the CRC field of a PNG chunk.
'''
def __init__(self, chunk):
super(PNGGene, self).__init__()
self.length = chunk['length']
self.name = chunk['name']
self.data = chunk['data']
self.crc = chunk['crc']
def anomaly(self):
'''
If anomaly returns True, then the current
gene should not be fuzzed.
'''
if self.length == 0:
return True
else:
return False
def is_equal(self, other):
'''
To identify PNG chunks of same type.
'''
if not isinstance(other, self.__class__):
return False
if self.name == other.name and PNGGene.asciiname(self.name) != 'IEND':
return True
else:
return False
# This function must be implemented in order
def serialize(self):
'''
This function is called to serialize in-memory data of a PNG chunk.
'''
self.fix_crc()
bytestring = ''
chunk_data = super(PNGGene, self).serialize()
bytestring += struct.pack('>I', len(chunk_data))
bytestring += struct.pack('>I', self.name)
bytestring += chunk_data
bytestring += struct.pack('>I', self.crc)
return bytestring
def fix_crc(self):
'''
re-calculates the Gene's CRC checksum.
'''
checksum = zlib.crc32(
struct.pack('>I', self.name)
)
self.crc = zlib.crc32(
self.data, checksum
) & 0xffffffff
@staticmethod
def asciiname(chunkname):
'''
Converts a chunk name to ascii and returns it.
'''
return '%c%c%c%c' % (
(chunkname >> 24) & 0xFF,
(chunkname >> 16) & 0xFF,
(chunkname >> 8) & 0xFF,
(chunkname & 0xFF)
)
class PNGSerializer(serializer.BaseSerializer):
'''
The PNG Serializer.
This class is used to serialize a tree of PNGGenes into a file. Since
PNG is just a chunk-based format, there is no a tree of genes, but
a list of genes. During the serialization, the CRC of each chunk is
fixed and some chunks, which are required to be compressed, are
deflated using the zlib.
'''
def __init__(self):
super(PNGSerializer, self).__init__()
@staticmethod
def deflate_idat_chunks(genes):
'''
deflate_idat_chunks takes as input a number of genes. Data stored
only in IDAT genes is collected in a bytestring and it is compressed
using the zlib module. Then the compressed bytestring is divided
again and copied in genes. This functions returns a list with the
deflated genes. Keep in mind that this function is working with a
deep copy of the genes given as input. Hence, do not worry for your
data in the genes passed as argument.
'''
indices = list()
deflated_genes = copy.deepcopy(genes)
datastream = str()
for idx, curr_gene in enumerate(genes):
if PNGGene.asciiname(curr_gene.name) == 'IDAT':
indices.append(idx)
datastream += curr_gene.get_data()
comp = zlib.compress(datastream)
idatno = len(indices)
if idatno > 0:
chunk_len = int(math.ceil(float(len(comp)) / float(idatno)))
for cnt, index in enumerate(indices):
start = cnt * chunk_len
if index != indices[-1]:
deflated_genes[index].set_data(
comp[start : start+chunk_len])
else:
deflated_genes[index].set_data(
comp[start : ]
)
deflated_genes[index].length = len(
deflated_genes[index].get_data()
)
return deflated_genes
def serialize(self, genes):
'''
This method serializes each one of the genes given as argument. The
serialized bytestring of each of the genes is appended in a buffer
that contains the PNG header. The bytestring of the whole PNG
is returned.
'''
bytestring = PNG_SIGNATURE
deflated_genes = PNGSerializer.deflate_idat_chunks(genes)
bytestring += super(PNGSerializer, self).serialize(deflated_genes)
return bytestring
class PNGDeserializer(deserializer.BaseDeserializer):
'''
A parser for PNG files.
This class is used to parse the chunks of a PNG file and construct
PNGGene objects with the contents of the chunks. Moreover, the
deserializer will perform decompression to the zipped data in order to
fuzz them directly in memory.
'''
fsize = None
fstream = None
chunks = None
def __init__(self):
super(PNGDeserializer, self).__init__()
self.fsize = 0
self.fstream = None
self.chunks = list()
def deserialize(self, filename):
'''
Parses the chosen PNG file.
'''
# initialize input file
genes = list()
# open and read PNG header
self._prepare(filename)
self._parse_signature()
# parse data chunks
for chunk in self._parse_chunks():
self.chunks.append(chunk)
# decompress IDAT chunks (zlib streams)
self._inflate_idat_chunks()
# initialize gene list with deflated chunks
for chunk in self.chunks:
genes.append(PNGGene(chunk))
self.fstream.close()
self.fsize = 0
self.chunks = list()
return genes
def _inflate_idat_chunks(self):
'''
This method takes all IDAT PNG chunks that was read and decompress
their data using zlib module.
'''
datastream = str()
indices = list()
for idx, chunk in enumerate(self.chunks):
if PNGGene.asciiname(chunk['name']) == 'IDAT':
datastream += chunk['data']
indices.append(idx)
decomp = zlib.decompress(datastream)
idatno = len(indices)
chunk_len = int(math.ceil(float(len(decomp)) / float(idatno)))
for cnt, index in enumerate(indices):
start = cnt * chunk_len
if index != indices[-1]:
self.chunks[index]['data'] = decomp[start : start + chunk_len]
else:
self.chunks[index]['data'] = decomp[start:]
self.chunks[index]['length'] = len(self.chunks[index]['data'])
def _parse_signature(self):
'''
The first 8 bytes of every PNG image must be the signature.
'''
signature = self.fstream.read(8)
assert len(signature) == 8
def _parse_chunks(self):
'''
A generator that parses all chunks of the chosen PNG image.
'''
index = 0
while self.fsize > self.fstream.tell():
index += 1
chunk = dict()
chunk['index'] = index
chunk['length'], = struct.unpack('>I', self.fstream.read(4))
chunk['name'], = struct.unpack('>I', self.fstream.read(4))
chunk['data'] = self.fstream.read(chunk['length'])
chunk['crc'], = struct.unpack('>I', self.fstream.read(4))
yield chunk
def _get_filesize(self):
'''
Returns the file size.
'''
where = self.fstream.tell()
self.fstream.seek(0, 2)
size = self.fstream.tell()
self.fstream.seek(where, 0)
return size
def _prepare(self, filename):
'''
Preparation before parsing.
'''
if not os.path.isfile(filename):
raise IOError('%s is not a regural file.' % filename)
self.chunks = list()
self.fstream = open(filename, 'rb')
self.fsize = self._get_filesize()
|
# -*- coding: utf-8 -*-
"""
activity stream
Customer Portal Home
:copyright: (c) 2012-2013 by Openlabs Technologies & Consulting (P) Limited
:license: GPLv3, see LICENSE for more details.
"""
from decimal import Decimal
import pytz
import hashlib
import base64
import urllib
from datetime import timedelta
from wtforms import (Form, TextField, SelectField, TextAreaField,
BooleanField, validators)
from nereid import login_required, render_template
from trytond.model import ModelSQL, ModelView, fields
from trytond.pool import Pool
class NereidUser(ModelSQL, ModelView):
_name = "nereid.user"
activity_stream = fields.One2Many('activity.stream1', 'actor',
'Activity Stream'
)
class ActivityStream(ModelSQL, ModelView):
"Activity Stream"
_name = 'activity.stream1'
"""
Model created for Activity Stream.
"""
actor = fields.Many2One('nereid.user', 'Nereid Users',
required= True, select=True
)
verb = fields.Char("Verb")
object = fields.Reference("Object", selection='models_get',
select=True
)
target = fields.Many2One('nereid.user',
'Target', required= True
)
create_date = fields.DateTime('Published On', readonly=True, select=True)
def models_get(self):
pool = Pool()
model_obj = pool.get('ir.model')
model_ids = model_obj.search([])
res = []
for model in model_obj.browse(model_ids):
res.append([model.model, model.name])
return res
ActivityStream()
|
import requests
from lxml import etree
import sys
def login():
pass
def getcsrfmiddlewaretoken():
url = 'https://www.appannie.com/account/login/?_ref=header'
headers = {"user-agent":'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0'}
html = requests.get(url,headers=headers)
csrfmiddlewaretoken = etree.HTML(html.text).xpath('//input[@name="csrfmiddlewaretoken"]/@value')
print(csrfmiddlewaretoken)
if __name__ == '__main__':
getcsrfmiddlewaretoken()
# Mozilla/5.0 (Windows NT 6.1; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0 |
import os
import glob
from subprocess import call
def list_raster(indir):
tif_list = glob.glob(indir+'\*.tif')
with open(indir+'\\tif_list.txt', 'wb') as f:
for fn in tif_list:
path, name = os.path.split(fn)
print fn
f.writelines(fn+'\n')
return
def build_vrt(indir, outdir):
list_dir = glob.glob(indir+'\*.txt')
print '\nfound %s in %s' % (list_dir[0], indir)
print '\nbuilding vrt...'
# be careful when building vrt for landsat8 because
# bands 10 and 11 becoming 2 and 3 in the vrt
# do not forget to change the name of the output vrt
ndvi_anom = outdir+"\\sub.vrt"
vrt_make = ["gdalbuildvrt", "-separate", "-input_file_list", list_dir[0], ndvi_anom]
call(vrt_make)
return
def main():
refimg_dir = r"reference image"
subimg_dir = r"subject image"
out_dir = os.getcwd()
list_raster(refimg_dir)
build_vrt(refimg_dir, refimg_dir)
list_raster(subimg_dir)
build_vrt(subimg_dir, subimg_dir)
if __name__ == "__main__":
main()
|
from asyncio import Transport
class MockTransportBase(Transport):
def __init__(self, myProtocol=None, extra=None):
super().__init__(extra)
self.writeCount = 0
self.loop = None
self.delay = None
self.sink = None
self.protocol = myProtocol
self.closed = False
def setMyProtocol(self, protocol):
self.protocol = protocol
def setWriteDelay(self, loop, delay=1):
if loop == None and delay > 0:
self.loop = None
self.delay = None
else:
self.loop = loop
self.delay = delay
def _close(self, *args):
if self.protocol:
self.protocol.connection_lost(None)
self.protocol = None
def _write(self, data):
pass
def write(self, data):
if self.closed: return
self.writeCount += 1
if self.delay:
self.loop.call_later(self.delay, self._write, data)
else:
self._write(data)
def close(self, *args):
if self.closed: return
self.closed = True
if self.delay:
self.loop.call_later(self.delay, self._close, *args)
else:
self._close(*args)
class MockTransportToProtocol(MockTransportBase):
@classmethod
def CreateTransportPair(cls, protocol1=None, protocol2=None):
t1 = cls(protocol1)
t2 = cls(protocol2)
t1.setRemoteTransport(t2)
t2.setRemoteTransport(t1)
return (t1, t2)
def setRemoteTransport(self, remoteTransport):
self.sink=remoteTransport
def _write(self, data):
if not self.sink:
raise Exception("Write failed! No remote destination configured yet")
elif self.sink.protocol:
self.sink.protocol.data_received(data)
else:
raise Exception("Write failed! Remote protocol already appears closed")
def _close(self, *args):
self.sink and self.sink.close()
super()._close()
class MockTransportToStorageStream(MockTransportBase):
def __init__(self, sinkStream, myProtocol=None, extra=None):
super().__init__(myProtocol, extra)
self.sink = sinkStream
def _write(self, data):
self.sink.write(data)
|
from googleapiclient.discovery import build
from pymongo import MongoClient
from datetime import datetime
import json
#requests da lib no lambda - para rodar localmente -> import requests
# import requests
from botocore.vendored import requests
def lambda_handler(event, context):
my_api_key = "##############################"
my_cse_id = "#############################"
client = MongoClient('########################')
db = client.boavenda
collection_busca_site_cliente = db['BuscaSiteCliente']
collection_resultado_busca_google = db['ResultadoBuscaGoogle']
def google_search(search_term, api_key, cse_id, start):
r = requests.get('https://customsearch.googleapis.com/customsearch/v1?q=' + search_term + '&cx=' + cse_id + '&key=' + api_key + '&start=' + str(start) + '&gl=br&lr=lang_pt')
if 'items' in r.json():
return r.json()['items']
else:
return []
#buscar no mongo e loopar
cursor = collection_busca_site_cliente.find()
buscas = list(cursor)
for b in buscas:
link_busca = b['link']
busca = b['busca']
results = []
for i in range(0,5):
results.extend(google_search(busca, my_api_key, my_cse_id, i*10+1))
for i, result in enumerate(results):
if link_busca in result['link']:
dados = {}
dados['posicaoGoogle'] = i+1
try:
dados['buscaId'] = b['buscaId']
except:
dados['buscaId'] = b['_id']
try:
dados['preco'] = float(result['pagemap']['offer'][0]['price'])
except:
dados['preco'] = None
try:
dados['imagem'] = result['pagemap']['cse_image'][0]['src']
except:
dados['imagem'] = None
try:
dados['review'] = result['pagemap']['review']
except:
dados['review'] = None
dados['dtIns'] = datetime.utcnow()
collection_resultado_busca_google.insert_one(dados)
return {
'statusCode': 200,
'body': json.dumps('Rodou')
}
# lambda_handler(None,None) |
"""
Programma voor het berekenen van de frequenties voor staande
golven in een half gesloten buis.
"""
import numpy as np
import matplotlib.pyplot as plt
class Freq(object):
"""
Frequentie object
Berekent tabel met staandegolf frequenties
"""
def __init__(self, l, v):
self.v = v
self.l = l
def plot(self,f,n):
w = self.v/f
print w
x = np.arange(0,1.32,0.01)
y = np.cos(1.0/w*x*2*np.pi)
y2 = -y
plt.plot(x,y,x,y2)
plt.title("n={0}".format(n))
plt.xlabel('lengte [m]')
plt.ylabel('snelheid [m/s]')
def table(self):
"""
maak tabel
Deze functie definieert eerst een reeks voor *n*:
.. math::
n = 1, 2, 3, 4, 5
en berekent dan voor elke *n* de golflengte en frequentie.
De frequentie kunnen we instellen op de functiegenerator, zodat we
de bij de *n* horende staande golf kunnen opwekken.
.. math::
l=\\left(2 \\cdot n - 1 \\right) \\cdot \\frac{1}{4} \\lambda \\\\
\\lambda = \\frac{4}{2n-1} \\cdot l \\\\
f = \\frac{v}{\\lambda} \\\\
"""
v = self.v#snelheid van geluid in lucht in m/s
l = self.l#lengte buis in meters
n = np.arange(1,6)
w = 4.0/(2*n-1)*l#golflengte in meters
f = v/w#frequentie in Hz
print f
print "frequentie \t n \t lambda"
print ""
for i, nu in enumerate(n):
print "{0} \t {1} \t {2}".format(f[i], nu, (2*nu-1)*0.25)
return f
if __name__ == '__main__':
freq = Freq(1.32, 344)
f=freq.table()
print f[0]
plt.subplot(221)
freq.plot(f[0],0)
plt.subplot(222)
freq.plot(f[1],1)
plt.subplot(223)
freq.plot(f[2],2)
plt.subplot(224)
freq.plot(f[3],3)
plt.show()
|
"""this is a example package that im ..."""
from datetime import date
from math import pi
import webbrowser as w
from random import choice
import turtle as t
import wikipedia # You must install wikipedia before you're useing wikipedia_info
__version__ = '0.0.5'
MAXPOOWER = 39.99999999999999
MINPOOWER = -39.99999999999999
periodic_table_many = 118
def CircleArea(radius):
print('Area:', str(pi*radius**2))
def GlobeVolume(radius):
print('Volume:', str(4/3*pi*radius**3))
def CircleCircumference(radius):
print('Circumference:', str(pi*2*radius))
def GlobeArea(radius):
print('Area:', str(4*pi*radius**2))
def Not_use_this(url):
"this is done with: while True: w(webbrowser).open(url)"
while True: w.open(url)
def wikipedia_info(search_or_find_for, search_or_find):
if search_or_find == 'search':
s = wikipedia.search(search_or_find_for)
print(s)
elif search_or_find == 'find':
result = wikipedia.page(search_or_find_for)
print(result.summary)
def turtle_circle(radius, color, x, y, shape=None, hideturtle=True):
"WARNING: It can be a bug without i\'m know it, so be careful"
t.penup()
t.goto(x, y)
t.shape(shape)
t.pendown()
t.pensize(1)
t.color(color)
t.begin_fill()
t.circle(radius)
t.end_fill()
t.penup()
if hideturtle is True:
t.hideturtle()
def rektangel(width, height, color, x, y, shape=None, hideturtle=True):
"WARNING: It can be a bug without i\'m know it, so be careful"
t.penup()
t.goto(x, y)
t.shape(shape)
t.pendown()
t.pensize(1)
t.color(color)
t.begin_fill()
for raknare in range(1, 3):
t.forward(width)
t.right(90)
t.forward(height)
t.right(90)
t.end_fill()
t.penup()
if hideturtle is True:
t.hideturtle()
class prime:
def __init__(self, name, age):
self.name = name
self.age = age
def primes_number(start, end):
for n in range(start, end):
for x in range(2, n):
if n % x == 0:
print(n, 'equals', x, '*', n//x)
break
else:
# loop fell through without finding a factor
if (int(number) != 0) and (int(number) != 1):
print(number, 'is a prime number')
else:
print(number, 'is not a prime number or a composite number')
def primes_number2(number):
for x in range(2, number):
if (int(number) == 0) or (int(number) == 1):
break
if number % x == 0:
print(number, 'equals', x, '*', number//x)
break
else:
# loop fell through without finding a factor
if (int(number) != 0) and (int(number) != 1):
print(number, 'is a prime number')
else:
print(number, 'is not a prime number or a composite number')
def no_fill_circle(radius, color, x, y, shape=None, hideturtle=True):
"WARNING: It can be a bug without i\'m know it, so be careful"
t.penup()
t.goto(x, y)
t.shape(shape)
t.pendown()
t.pensize(1)
t.color(color)
t.circle(radius)
t.penup()
if hideturtle is True:
t.hideturtle()
def no_fill_rektangel(width, height, color, x, y, shape=None, hideturtle=True):
"WARNING: It can be a bug without i\'m know it, so be careful"
t.penup()
t.goto(x, y)
t.shape(shape)
t.pendown()
t.pensize(1)
t.color(color)
t.begin_fill()
for raknare in range(1, 3):
t.forward(width)
t.right(90)
t.forward(height)
t.right(90)
t.end_fill()
t.penup()
if hideturtle is True:
t.hideturtle()
def use_math(n, q):
print('The product of these is: ' + str(n * q))
print('The sum of these is ' + str(n + q))
if n > q:
print('The differens of these is ' + str(n - q))
print(n / q)
print('The rest of these is ' + str(n % q))
else:
print('The differens of these is ' + str(q - n))
print(q / n)
print('The rest of these is ' + str(q % n))
def fib(end_n): # write Fibonacci series up to end_n
a, b = 0, 1
while a < end_n:
print(a, end=' ')
a, b = b, a+b
print()
def fib2(end_n): # return Fibonacci series up to end_n
result = []
a, b = 0, 1
while a < end_n:
result.append(a) # see below
a, b = b, a+b
return result
def fab(end_n):
a, b = 0, 1
while a < end_n:
print(a)
a, b = b, a+b
print()
def feb(numbers):
"it starts with 0, 1"
a, b = 0, 1
for i in range(0, numbers):
print(a)
a, b = b, a+b
def cheeseshop(kind, *arguments, **keywords):
print("-- Do you have any", kind, "?")
kind1 = "-- I'm sorry, we're all out of " + kind
kind2 = "-- Here you are your " + kind
both = [kind1, kind2]
ct = choice(both)
print(ct)
for arg in arguments:
print(arg)
print("-" * 40)
for kw in keywords:
print(kw, ":", keywords[kw])
def poower(d, a):
if (d < 40 and a < 40) and (d > -40 and a > -40):
print(d ** a)
print(a ** d)
elif d <= MINPOOWER or a <= MINPOOWER:
print('It\'s to small.')
else:
print('It\'s to big.')
def big(x, y, z, n, m, e):
print('The biggest number is ' + str(max(x, y, z, n, m, e)))
def small(x, y, z, n, m, e):
print('The smallest number is ' + str(min(x, y, z, n, m, e)))
def secretry(password):
print('your new password is: ' + ''.join(reversed(password)))
answer = input('(yes or no)Even better? Write it here: ')
if answer == 'yes':
pass
def sec_on_days(days):
hours = days * 24
minuts = hours * 60
seconds = minuts * 60
if days != 1:
print('They are ' + str(seconds) + ' seconds on ' + str(days) + ' days.')
else:
print('They are 86400 seconds on one day.')
def min_on_days(days):
hours = days * 24
minuts = hours * 60
if days != 1:
print('They are ' + str(minuts) + ' minuts on ' + str(days) + ' days.')
else:
print('They are 1440 minuts on one day.')
def sec_on_weeks(weeks):
days = weeks * 7
hours = days * 24
minuts = hours * 60
seconds = minuts * 60
print('They are ' + str(seconds) + ' seconds on ' + str(weeks) + ' weeks.')
def min_on_weeks(weeks):
days = weeks * 7
hours = days * 24
minuts = hours * 60
print('They are ' + str(minuts) + ' minuts on ' + str(weeks) + ' weeks.')
def wekday(year, month, day):
if date(year, month, day).weekday() == 0:
print('Monday')
elif date(year, month, day).weekday() == 1:
print('Tuesday')
elif date(year, month, day).weekday() == 2:
print('Wednesday')
elif date(year, month, day).weekday() == 3:
print('Thurday')
elif date(year, month, day).weekday() == 4:
print('Friday')
elif date(year, month, day).weekday() == 5:
print('Saturday')
else:
print('Sunday')
def whileee(while_word):
while True:
print(while_word)
svar = input('More? (yes, no): ')
if svar == 'no':
break
def open_webb(webb):
w.open(webb)
def random_number3(x, y, z):
ord3 = [x, y, z]
ord4 = choice(ord3)
print(str(ord4))
def random_number4(x, y, z, n):
ord5 = [x, y, z, n]
ord6 = choice(ord5)
print(str(ord6))
def random_not_number3(x, y, z):
wert3 = [x, y, z]
wert4 = choice(wert3)
print(wert4)
def random_not_number4(s, x, y, z, n):
"if you see s, that you give s a number but not in the random"
wert5 = [x, y, z, n]
wert6 = choice(wert5)
print(wert6)
def random_all():
"WARNING: Don\'t use this in range, and use random_not_number"
"if you see s, that you give s a number but not in the random"
print('WARNING: Don\'t use this in range, and use random_not_number')
|
import numpy as np
import os
import matplotlib.pyplot as plt
from scipy import signal
from helperFunctions import *
from math import log
root_dir = os.path.abspath('./')
data_dir = os.path.join(root_dir, '../dataset', 'dataset3.5', 'subject1_ascii')
data_to_be_referred = [7,8,11,12,18,21,22,31]
files = ['train_subject1_raw01.asc']#, 'train_subject1_raw02.asc', 'train_subject1_raw03.asc']
fs_Hz = 512.0 # assumed sample rate for the EEG data
NFFT = 512 # pick the length of the fft
overlap = NFFT - 50 # fixed step of 50 points
f_res_2 = []
pxx_res_2 = []
data = []
pxx_temp = []
counter = 0
avg = np.zeros(96)
eeg_data_uV = []
eeg_temp = []
target = []
# fs = 5000.0
# lowcut = 500.0
# highcut = 1250.0
# T = 0.05
# nsamples = T * fs
# t = np.linspace(0, T, nsamples, endpoint=False)
# a = 0.02
# f0 = 600.0
# x = 0.1 * np.sin(2 * np.pi * 1.2 * np.sqrt(t))
# x += 0.01 * np.cos(2 * np.pi * 312 * t + 0.1)
# x += a * np.cos(2 * np.pi * f0 * t + .11)
# x += 0.03 * np.cos(2 * np.pi * 2000 * t)
# plt.figure(2)
# plt.clf()
# plt.plot(t, x, label='Noisy signal')
# y = butter_bandpass_filter(x, lowcut, highcut, fs, order=6)
# plt.plot(t, y, label='Filtered signal (%g Hz)' % f0)
# plt.xlabel('time (seconds)')
# plt.hlines([-a, a], 0, T, linestyles='--')
# plt.grid(True)
# plt.axis('tight')
# plt.legend(loc='upper left')
# plt.show()
#for file in files:
temp_eeg_data_uV, temp_target = loadData(os.path.join(data_dir, files[0]), fs_Hz)
eeg_data_uV.extend(temp_eeg_data_uV)
target.extend(temp_target)
#t_sec = (np.arange(f_eeg_data_uV.size))/fs_Hz
# eeg_temp = eeg_data_uV[1024:1536]
# eeg_temp = np.transpose(eeg_temp)
# eeg_temp_sel = eeg_temp[2]
# for row in eeg_temp:
# print "\n\n", row
# fft_plot(eeg_temp_sel)
# for i in range(32):
# # plt.subplot(2, 1, 1)
# # plt.xlim([512, 1024])
# # plt.plot(eeg_temp_sel)
# # print eeg_temp[2]
# #plt.show()
# f_eeg_temp = butter_bandpass_filter(eeg_temp_sel, 8, fs_Hz,'high')
# f_eeg_temp = butter_bandpass_filter(f_eeg_temp, 30, fs_Hz,'low')
# f_eeg_temp = butter_bandpass_filter(f_eeg_temp, 30, fs_Hz,'low')
# fft_plot(f_eeg_temp)
# plt.subplot(2, 1, 2)
# plt.xlim([0, 512])
#plt.ylim([5000, 7000])
# plt.plot(f_eeg_temp)
#plt.show()
# plt.figure(figsize=(9, 6))
# f, pxx = signal.welch(f_eeg_temp, fs_Hz, nperseg=512, noverlap=overlap)
# plt.plot(f, pxx)
# plt.ylim([0, 100])
# plt.xlabel("Frequency (Hz)")
# plt.ylabel("PSD (dB/Hz) %g"%i)
# plt.show()
#*********************************************************
print len(target)
counter = 0
for i in range(len(eeg_data_uV)/512):
one_sec_eeg_data_uV = eeg_data_uV[i*512:(i+1)*512]
# print "****",len(one_sec_eeg_data_uV[1]),'\n\n\n'
one_sec_eeg_data_uV = np.transpose(one_sec_eeg_data_uV)
for row in range(len(one_sec_eeg_data_uV)):
counter += 1
f_eeg_data_uV = butter_bandpass_filter(one_sec_eeg_data_uV[row], 8, fs_Hz,'high')
f_eeg_data_uV = butter_bandpass_filter(f_eeg_data_uV, 30, fs_Hz,'low')
f_eeg_data_uV = butter_bandpass_filter(f_eeg_data_uV, 30, fs_Hz,'low')
f, pxx = signal.welch(f_eeg_data_uV, fs_Hz, nperseg=512, noverlap=overlap)
pxx_res_2 = pxx[8:31:2]
# f_res_2.append(f[8:31:2])
pxx_temp.extend(pxx[8:31:2])
# print '*********',len(pxx[8:31:2])
#print pxx_res_2
# print counter,i
# plt.xlabel(counter)
# plt.plot(f[8:31:2], pxx[8:31:2],'r')
# plt.plot(f, pxx,'g')
# plt.ylim([0,1000])
# plt.show()
data.append(pxx_temp)
# counter = 0
pxx_temp = []
print data[0]
print counter
#**************************************
# for i in range(len(data)):
# print data[i],'\n'
# print 'lendata',len(data),'lentarget',len(target)
# print f_res_2
# print '------------------',len(pxx_res_2),'\n\n'
# for i in range(len(data)):
# # print pxx_res_2[i],'\n\n'
# for j in range(len(data[i])) :
# data[i][j] = log(data[i][j])
# print pxx_res_2[i],'\n\n'
# print pxx_res_2
# full_spec_PSDperBin, full_t_spec, freqs = convertToFreqDomain(f_eeg_data_uV, fs_Hz, NFFT, overlap)
# spec_PSDperBin = full_spec_PSDperBin[:, 1:-1:2] # get every other time slice
# t_spec = full_t_spec[1:-1:2] # get every other time slice
# make the spectrogram plot
#plt.pcolor(t_spec, freqs, 10*np.log10(spec_PSDperBin)) # dB re: 1 uV
# plt.clim(25-5+np.array([-40, 0]))
# plt.xlim(t_sec[0], t_sec[-1])
# if (t_lim_sec[2-1] != 0):
# plt.xlim(t_lim_sec)
# plt.ylim(f_lim_Hz)
# plt.xlabel('Time (sec)')
# plt.ylabel('Frequency (Hz)')
#plt.title(fname[12:])
|
from keras.models import load_model
import numpy as np
def load_models():
model1 = load_model('Halloween_model_edition_80.h5')
print("\nmodel1 loaded")
model2 = load_model('Halloween_model_edition_73.h5')
print("\nmodel2 loaded")
model3 = load_model("Halloween_model_edition_78.h5")
print("\nmodel3 loaded")
return model1, model2,model3
model1,model2,model3 = load_models()
np.savez_compressed('halloween_models.npz',
model_80=model1,
model_73=model2,
model_78=model3)
|
import setuptools
with open("docs/README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="linkcheck-pkg-MLJBrackett",
version="1.1",
author="Michael Brackett",
author_email="mljbrackett@gmail.com",
description="A simple link checking program",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MLJBrackett/link-check",
license="MIT",
install_requires=[
"requests",
"argparse",
"colorama",
"black",
"flake8",
"coverage",
],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
"console_scripts": [
"link_check = src.link_check:main_wrapper",
]
},
python_requires=">=3.8",
)
|
import tensorflow as tf
import horovod.tensorflow as hvd
# Ground Truth Shape: [npoint, 7 (w, l, h, x, y, z, r)]
# Prediction Shape: [npoint, 7 (w, l, h, x, y, z, r)]
'''
x (w)
^
0 | 1
|---------------|
| * | ---> y (l)
|---------------|
3 2
'''
eps = tf.constant(1e-6)
# def roi_logits_to_attrs_tf(base_coors, input_logits, anchor_size):
# anchor_diag = tf.sqrt(tf.pow(anchor_size[0], 2.) + tf.pow(anchor_size[1], 2.))
# w = tf.clip_by_value(tf.exp(input_logits[:, 0]) * anchor_size[0], 0., 1e5)
# l = tf.clip_by_value(tf.exp(input_logits[:, 1]) * anchor_size[1], 0., 1e5)
# h = tf.clip_by_value(tf.exp(input_logits[:, 2]) * anchor_size[2], 0., 1e5)
# x = tf.clip_by_value(input_logits[:, 3] * anchor_diag + base_coors[:, 0], -1e5, 1e5)
# y = tf.clip_by_value(input_logits[:, 4] * anchor_diag + base_coors[:, 1], -1e5, 1e5)
# z = tf.clip_by_value(input_logits[:, 5] * anchor_size[2] + base_coors[:, 2], -1e5, 1e5)
# r = input_logits[:, 6] * 3.1415927
# return tf.stack([w, l, h, x, y, z, r], axis=-1)
def roi_logits_to_attrs_tf(base_coors, input_logits, anchor_size):
anchor_diag = tf.sqrt(tf.pow(anchor_size[0], 2.) + tf.pow(anchor_size[1], 2.))
w = tf.clip_by_value(tf.exp(input_logits[:, 0]) * anchor_size[0], 0., 1e7)
l = tf.clip_by_value(tf.exp(input_logits[:, 1]) * anchor_size[1], 0., 1e7)
h = tf.clip_by_value(tf.exp(input_logits[:, 2]) * anchor_size[2], 0., 1e7)
x = tf.clip_by_value(input_logits[:, 3] * anchor_diag + base_coors[:, 0], -1e7, 1e7)
y = tf.clip_by_value(input_logits[:, 4] * anchor_diag + base_coors[:, 1], -1e7, 1e7)
z = tf.clip_by_value(input_logits[:, 5] * anchor_size[2] + base_coors[:, 2], -1e7, 1e7)
r = tf.clip_by_value(input_logits[:, 6] * 3.1415927, -1e7, 1e7)
# r = input_logits[:, 6]
return tf.stack([w, l, h, x, y, z, r], axis=-1)
def roi_attrs_to_logits(base_coors, input_attrs, anchor_size):
anchor_diag = tf.sqrt(tf.pow(anchor_size[0], 2.) + tf.pow(anchor_size[1], 2.))
logits_w = tf.log(input_attrs[:, 0] / anchor_size[0])
logits_l = tf.log(input_attrs[:, 1] / anchor_size[1])
logits_h = tf.log(input_attrs[:, 2] / anchor_size[2])
logits_x = (input_attrs[:, 3] - base_coors[:, 0]) / anchor_diag
logits_y = (input_attrs[:, 4] - base_coors[:, 1]) / anchor_diag
logits_z = (input_attrs[:, 5] - base_coors[:, 2]) / anchor_size[2]
logits_r = input_attrs[:, 6] / 3.1415927
return tf.stack([logits_w, logits_l, logits_h, logits_x, logits_y, logits_z, logits_r], axis=-1)
def bbox_logits_to_attrs_tf(input_roi_attrs, input_logits):
roi_diag = tf.sqrt(tf.pow(input_roi_attrs[:, 0], 2.) + tf.pow(input_roi_attrs[:, 1], 2.))
w = tf.clip_by_value(tf.exp(input_logits[:, 0]) * input_roi_attrs[:, 0], 0., 1e7)
l = tf.clip_by_value(tf.exp(input_logits[:, 1]) * input_roi_attrs[:, 1], 0., 1e7)
h = tf.clip_by_value(tf.exp(input_logits[:, 2]) * input_roi_attrs[:, 2], 0., 1e7)
x = tf.clip_by_value(input_logits[:, 3] * roi_diag + input_roi_attrs[:, 3], -1e7, 1e7)
y = tf.clip_by_value(input_logits[:, 4] * roi_diag + input_roi_attrs[:, 4], -1e7, 1e7)
z = tf.clip_by_value(input_logits[:, 5] * input_roi_attrs[:, 2] + input_roi_attrs[:, 5], -1e7, 1e7)
r = tf.clip_by_value(input_logits[:, 6] * 3.1415927 + input_roi_attrs[:, 6], -1e7, 1e7)
# r = input_logits[:, 6] + input_roi_attrs[:, 6]
return tf.stack([w, l, h, x, y, z, r], axis=-1)
def get_rotate_matrix(r):
rotate_matrix = tf.stack([tf.cos(r), -tf.sin(r), tf.sin(r), tf.cos(r)], axis=-1) # [n, 4]
rotate_matrix = tf.reshape(rotate_matrix, shape=[-1, 2, 2]) # [n, 2, 2]
return rotate_matrix
def get_2d_vertex_points(gt_attrs, pred_attrs):
gt_w = gt_attrs[:, 0] # [n]
gt_l = gt_attrs[:, 1] # [n]
gt_x = gt_attrs[:, 3] # [n]
gt_y = gt_attrs[:, 4] # [n]
gt_r = gt_attrs[:, 6] # [n]
gt_v0 = tf.stack([gt_w / 2, -gt_l / 2], axis=-1) # [n, 2]
gt_v1 = tf.stack([gt_w / 2, gt_l / 2], axis=-1) # [n, 2]
gt_v2 = tf.stack([-gt_w / 2, gt_l / 2], axis=-1) # [n, 2]
gt_v3 = tf.stack([-gt_w / 2, -gt_l / 2], axis=-1) # [n, 2]
gt_v = tf.stack([gt_v0, gt_v1, gt_v2, gt_v3], axis=1) # [n, 4, 2]
pred_w = pred_attrs[:, 0] # [n]
pred_l = pred_attrs[:, 1] # [n]
pred_x = pred_attrs[:, 3] # [n]
pred_y = pred_attrs[:, 4] # [n]
pred_r = pred_attrs[:, 6] # [n]
rel_x = pred_x - gt_x # [n]
rel_y = pred_y - gt_y # [n]
rel_r = pred_r - gt_r # [n]
rel_xy = tf.expand_dims(tf.stack([rel_x, rel_y], axis=-1), axis=1) # [n, 1, 2]
pred_v0 = tf.stack([pred_w / 2, -pred_l / 2], axis=-1) # [n, 2]
pred_v1 = tf.stack([pred_w / 2, pred_l / 2], axis=-1) # [n, 2]
pred_v2 = tf.stack([-pred_w / 2, pred_l / 2], axis=-1) # [n, 2]
pred_v3 = tf.stack([-pred_w / 2, -pred_l / 2], axis=-1) # [n, 2]
pred_v = tf.stack([pred_v0, pred_v1, pred_v2, pred_v3], axis=1) # [n, 4, 2]
rot_pred_v = tf.transpose(tf.matmul(a=get_rotate_matrix(rel_r), b=pred_v, transpose_b=True),
perm=[0, 2, 1]) # [n, 4, 2]
rot_rel_xy = tf.transpose(tf.matmul(a=get_rotate_matrix(-gt_r), b=rel_xy, transpose_b=True),
perm=[0, 2, 1]) # [n, 1, 2]
rel_rot_pred_v = rot_pred_v + rot_rel_xy # [n, 4, 2]
rot_gt_v = tf.transpose(tf.matmul(a=get_rotate_matrix(-rel_r), b=gt_v, transpose_b=True),
perm=[0, 2, 1]) # [n, 4, 2]
rot_rel_xy = tf.transpose(tf.matmul(a=get_rotate_matrix(-pred_r), b=-rel_xy, transpose_b=True),
perm=[0, 2, 1]) # [n, 1, 2]
rel_rot_gt_v = rot_gt_v + rot_rel_xy # [n, 4, 2]
# [n, 2, 2] @ [n, 2, 4] = [n, 2, 4] -> [n, 4, 2]
return gt_v, rel_rot_pred_v, rel_rot_gt_v, rel_xy, rel_r
def get_2d_intersection_points(gt_attrs, rel_rot_pred_v):
gt_w = gt_attrs[:, 0] # [n]
gt_l = gt_attrs[:, 1] # [n]
output_points = []
for i in [-1, 0, 1, 2]:
v0_x = rel_rot_pred_v[:, i, 0] # [n]
v0_y = rel_rot_pred_v[:, i, 1] # [n]
v1_x = rel_rot_pred_v[:, i + 1, 0] # [n]
v1_y = rel_rot_pred_v[:, i + 1, 1] # [n]
kx = tf.math.divide_no_nan(v1_y - v0_y, v1_x - v0_x)
bx = tf.math.divide_no_nan(v0_y * v1_x - v1_y * v0_x, v1_x - v0_x)
ky = tf.math.divide_no_nan(v1_x - v0_x, v1_y - v0_y)
by = tf.math.divide_no_nan(v1_y * v0_x - v0_y * v1_x, v1_y - v0_y)
# kx = (v1_y - v0_y) / (v1_x - v0_x + eps) # [n]
# bx = (v0_y * v1_x - v1_y * v0_x) / (v1_x - v0_x + eps) # [n]
# ky = (v1_x - v0_x) / (v1_y - v0_y + eps) # [n]
# by = (v1_y * v0_x - v0_y * v1_x) / (v1_y - v0_y + eps) # [n]
p0 = tf.stack([gt_w / 2, kx * gt_w / 2 + bx], axis=-1) # [n, 2]
p1 = tf.stack([-gt_w / 2, -kx * gt_w / 2 + bx], axis=-1) # [n, 2]
p2 = tf.stack([ky * gt_l / 2 + by, gt_l / 2], axis=-1) # [n, 2]
p3 = tf.stack([-ky * gt_l / 2 + by, -gt_l / 2], axis=-1) # [n, 2]
p = tf.stack([p0, p1, p2, p3], axis=1) # [n, 4, 2]
output_points.append(p)
output_points = tf.concat(output_points, axis=1) # [n, 16, 2]
return output_points
def get_interior_vertex_points_mask(target_attrs, input_points):
target_w = tf.expand_dims(target_attrs[:, 0], axis=1) # [n, 1, 16]
target_l = tf.expand_dims(target_attrs[:, 1], axis=1) # [n, 1, 16]
target_x = target_w / 2 # [n, 4]
target_y = target_l / 2 # [n, 4]
x_mask = tf.cast(tf.less_equal(tf.abs(input_points[:, :, 0]), target_x), dtype=tf.float32) # [n, 4]
y_mask = tf.cast(tf.less_equal(tf.abs(input_points[:, :, 1]), target_y), dtype=tf.float32) # [n, 4]
return x_mask * y_mask # [n, 4]
#
#
def get_intersection_points_mask(target_attrs, input_points, rel_xy=None, rel_r=None):
if rel_xy is not None and rel_r is not None:
pred_r = target_attrs[:, 6] # [n]
rot_input_points = tf.transpose(tf.matmul(a=get_rotate_matrix(-rel_r), b=input_points, transpose_b=True),
perm=[0, 2, 1]) # [n, 16, 2]
rot_rel_xy = tf.transpose(tf.matmul(a=get_rotate_matrix(-pred_r), b=-rel_xy, transpose_b=True),
perm=[0, 2, 1]) # [n, 1, 2]
rel_rot_input_points = rot_input_points + rot_rel_xy
else:
rel_rot_input_points = input_points
target_w = tf.expand_dims(target_attrs[:, 0], axis=1) # [n, 1, 16]
target_l = tf.expand_dims(target_attrs[:, 1], axis=1) # [n, 1, 16]
target_x = target_w / 2 + 1e-3 # [n, 4]
target_y = target_l / 2 + 1e-3 # [n, 4]
# target_x = 1000 # [n, 4]
# target_y = 1000 # [n, 4]
max_x_mask = tf.cast(tf.less_equal(tf.abs(rel_rot_input_points[:, :, 0]), target_x), dtype=tf.float32) # [n, 4]
max_y_mask = tf.cast(tf.less_equal(tf.abs(rel_rot_input_points[:, :, 1]), target_y), dtype=tf.float32) # [n, 4]
return max_x_mask * max_y_mask # [n, 4]
def clockwise_sorting(input_points, masks):
coors_masks = tf.stack([masks, masks], axis=-1) # [n, 24, 2]
masked_points = input_points * coors_masks
centers = tf.math.divide_no_nan(tf.reduce_sum(masked_points, axis=1, keepdims=True),
(tf.reduce_sum(coors_masks, axis=1, keepdims=True))) # [n, 1, 2]
rel_vectors = input_points - centers # [n, 24, 2]
base_vector = rel_vectors[:, :1, :] # [n, 1, 2]
# https://stackoverflow.com/questions/14066933/direct-way-of-computing-clockwise-angle-between-2-vectors/16544330#16544330
dot = base_vector[:, :, 0] * rel_vectors[:, :, 0] + base_vector[:, :, 1] * rel_vectors[:, :, 1] # [n, 24]
det = base_vector[:, :, 0] * rel_vectors[:, :, 1] - base_vector[:, :, 1] * rel_vectors[:, :, 0] # [n, 24]
angles = tf.math.atan2(det + eps, dot + eps) # [n, 24] -pi~pi
angles_masks = (0.5 - (masks - 0.5)) * 1000. # [n, 24]
masked_angles = angles + angles_masks # [n, 24]
# _, sort_idx = tf.nn.top_k(-masked_angles, k=input_points.get_shape().as_list()[1], sorted=True) # [n, 24]
_, sort_idx = tf.nn.top_k(-masked_angles, k=tf.shape(input_points)[1], sorted=True) # [n, 24]
batch_id = tf.expand_dims(tf.range(start=0, limit=tf.shape(input_points)[0], dtype=tf.int32), axis=1)
# batch_ids = tf.stack([batch_id] * input_points.get_shape().as_list()[1], axis=1)
batch_ids = tf.tile(batch_id, [1, tf.shape(input_points)[1]])
sort_idx = tf.stack([batch_ids, sort_idx], axis=-1) # [n, 24, 2]
sorted_points = tf.gather_nd(input_points, sort_idx)
sorted_masks = tf.gather_nd(masks, sort_idx)
return sorted_points, sorted_masks
def shoelace_intersection_area(sorted_points, sorted_masks):
# https://en.wikipedia.org/wiki/Shoelace_formula
sorted_points = sorted_points * tf.stack([sorted_masks, sorted_masks], axis=-1) # [n, 24, 2]
last_vertex_id = tf.cast(tf.reduce_sum(sorted_masks, axis=1) - 1,
dtype=tf.int32) # [n] coors where idx=-1 will be convert to [0., 0.], so it's safe.
last_vertex_id = tf.stack([tf.range(start=0, limit=tf.shape(sorted_points)[0], dtype=tf.int32), last_vertex_id],
axis=-1) # [n, 2]
last_vertex_to_duplicate = tf.expand_dims(tf.gather_nd(sorted_points, last_vertex_id), axis=1) # [n, 1, 2]
padded_sorted_points = tf.concat([last_vertex_to_duplicate, sorted_points], axis=1) # [n, 24+1, 2]
x_i = padded_sorted_points[:, :-1, 0] # [n, 24]
x_i_plus_1 = padded_sorted_points[:, 1:, 0] # [n, 24]
y_i = padded_sorted_points[:, :-1, 1] # [n, 24]
y_i_plus_1 = padded_sorted_points[:, 1:, 1] # [n, 24]
area = 0.5 * tf.reduce_sum(x_i * y_i_plus_1 - x_i_plus_1 * y_i, axis=-1) # [n]
return area
def get_intersection_height(gt_attrs, pred_attrs):
gt_h = gt_attrs[:, 2]
gt_z = gt_attrs[:, 5]
pred_h = pred_attrs[:, 2]
pred_z = pred_attrs[:, 5]
gt_low = gt_z - 0.5 * gt_h
gt_high = gt_z + 0.5 * gt_h
pred_low = pred_z - 0.5 * pred_h
pred_high = pred_z + 0.5 * pred_h
top = tf.minimum(gt_high, pred_high)
bottom = tf.maximum(gt_low, pred_low)
intersection_height = tf.nn.relu(top - bottom)
return intersection_height
def get_3d_iou_from_area(gt_attrs, pred_attrs, intersection_2d_area, intersection_height, clip):
intersection_volume = intersection_2d_area * intersection_height
gt_volume = gt_attrs[:, 0] * gt_attrs[:, 1] * gt_attrs[:, 2]
pred_volume = pred_attrs[:, 0] * pred_attrs[:, 1] * pred_attrs[:, 2]
iou = tf.math.divide_no_nan(intersection_volume, gt_volume + pred_volume - intersection_volume)
# tf.summary.scalar('iou_nan_sum',
# hvd.allreduce(tf.reduce_sum(tf.cast(tf.is_nan(iou), dtype=tf.float32)), average=False))
if clip:
iou = tf.where(tf.is_nan(iou), tf.zeros_like(iou), iou)
return iou
def get_bev_iou_from_area(gt_attrs, pred_attrs, intersection_2d_area, clip):
gt_area = gt_attrs[:, 0] * gt_attrs[:, 1]
pred_area = pred_attrs[:, 0] * pred_attrs[:, 1]
iou = tf.math.divide_no_nan(intersection_2d_area, gt_area + pred_area - intersection_2d_area)
# tf.summary.scalar('iou_nan_sum',
# hvd.allreduce(tf.reduce_sum(tf.cast(tf.is_nan(iou), dtype=tf.float32)), average=False))
if clip:
iou = tf.where(tf.is_nan(iou), tf.zeros_like(iou), iou)
return iou
def cal_3d_iou(gt_attrs, pred_attrs, clip=False):
gt_v, rel_rot_pred_v, rel_rot_gt_v, rel_xy, rel_r = get_2d_vertex_points(gt_attrs, pred_attrs)
intersection_points = get_2d_intersection_points(gt_attrs=gt_attrs, rel_rot_pred_v=rel_rot_pred_v)
gt_vertex_points_inside_pred = get_interior_vertex_points_mask(target_attrs=pred_attrs, input_points=rel_rot_gt_v)
pred_vertex_points_inside_gt = get_interior_vertex_points_mask(target_attrs=gt_attrs, input_points=rel_rot_pred_v)
pred_intersect_with_gt = get_intersection_points_mask(target_attrs=gt_attrs, input_points=intersection_points)
intersection_points_inside_pred = get_intersection_points_mask(target_attrs=pred_attrs,
input_points=intersection_points, rel_xy=rel_xy,
rel_r=rel_r)
total_points = tf.concat([gt_v, rel_rot_pred_v, intersection_points], axis=1)
total_masks = tf.concat([gt_vertex_points_inside_pred, pred_vertex_points_inside_gt,
pred_intersect_with_gt * intersection_points_inside_pred], axis=1)
sorted_points, sorted_masks = clockwise_sorting(input_points=total_points, masks=total_masks)
intersection_2d_area = shoelace_intersection_area(sorted_points, sorted_masks)
intersection_height = get_intersection_height(gt_attrs, pred_attrs)
ious = get_3d_iou_from_area(gt_attrs, pred_attrs, intersection_2d_area, intersection_height, clip)
return ious
def cal_bev_iou(gt_attrs, pred_attrs, clip=False):
gt_v, rel_rot_pred_v, rel_rot_gt_v, rel_xy, rel_r = get_2d_vertex_points(gt_attrs, pred_attrs)
intersection_points = get_2d_intersection_points(gt_attrs=gt_attrs, rel_rot_pred_v=rel_rot_pred_v)
gt_vertex_points_inside_pred = get_interior_vertex_points_mask(target_attrs=pred_attrs, input_points=rel_rot_gt_v)
pred_vertex_points_inside_gt = get_interior_vertex_points_mask(target_attrs=gt_attrs, input_points=rel_rot_pred_v)
pred_intersect_with_gt = get_intersection_points_mask(target_attrs=gt_attrs, input_points=intersection_points)
intersection_points_inside_pred = get_intersection_points_mask(target_attrs=pred_attrs,
input_points=intersection_points, rel_xy=rel_xy,
rel_r=rel_r)
total_points = tf.concat([gt_v, rel_rot_pred_v, intersection_points], axis=1)
total_masks = tf.concat([gt_vertex_points_inside_pred, pred_vertex_points_inside_gt,
pred_intersect_with_gt * intersection_points_inside_pred], axis=1)
sorted_points, sorted_masks = clockwise_sorting(input_points=total_points, masks=total_masks)
intersection_2d_area = shoelace_intersection_area(sorted_points, sorted_masks)
ious = get_bev_iou_from_area(gt_attrs, pred_attrs, intersection_2d_area, clip)
return ious
def cal_3d_iou_debug(gt_attrs, pred_attrs, clip=False):
gt_v, rel_rot_pred_v, rel_rot_gt_v, rel_xy, rel_r = get_2d_vertex_points(gt_attrs, pred_attrs)
intersection_points = get_2d_intersection_points(gt_attrs=gt_attrs, rel_rot_pred_v=rel_rot_pred_v)
gt_vertex_points_inside_pred = get_interior_vertex_points_mask(target_attrs=pred_attrs, input_points=rel_rot_gt_v)
pred_vertex_points_inside_gt = get_interior_vertex_points_mask(target_attrs=gt_attrs, input_points=rel_rot_pred_v)
pred_intersect_with_gt = get_intersection_points_mask(target_attrs=gt_attrs, input_points=intersection_points)
intersection_points_inside_pred = get_intersection_points_mask(target_attrs=pred_attrs,
input_points=intersection_points, rel_xy=rel_xy,
rel_r=rel_r)
total_points = tf.concat([gt_v, rel_rot_pred_v, intersection_points], axis=1)
total_masks = tf.concat([gt_vertex_points_inside_pred, pred_vertex_points_inside_gt,
pred_intersect_with_gt * intersection_points_inside_pred], axis=1)
sorted_points, sorted_masks = clockwise_sorting(input_points=total_points, masks=total_masks)
intersection_2d_area = shoelace_intersection_area(sorted_points, sorted_masks)
intersection_height = get_intersection_height(gt_attrs, pred_attrs)
ious = get_3d_iou_from_area(gt_attrs, pred_attrs, intersection_2d_area, intersection_height, clip)
return ious, intersection_2d_area
|
import logging
import time
import requests
from eodag import EOProduct
from eodag.api.product.metadata_mapping import (
format_query_params,
mtd_cfg_as_conversion_and_querypath,
properties_from_json,
)
from eodag.plugins.search.base import Search
from eodag.utils import GENERIC_PRODUCT_TYPE, string_to_jsonpath
logger = logging.getLogger("eodag.search.data_request_search")
class DataRequestSearch(Search):
"""
Plugin to execute search requests composed of several steps:
- do a data request which defines which data shall be searched
- check the status of the request job
- if finished - fetch the result of the job
"""
def __init__(self, provider, config):
super(DataRequestSearch, self).__init__(provider, config)
self.config.__dict__.setdefault("result_type", "json")
self.config.__dict__.setdefault("results_entry", "content")
self.config.__dict__.setdefault("pagination", {})
self.config.__dict__.setdefault("free_text_search_operations", {})
self.next_page_url = None
for product_type in self.config.products.keys():
if "metadata_mapping" in self.config.products[product_type].keys():
self.config.products[product_type][
"metadata_mapping"
] = mtd_cfg_as_conversion_and_querypath(
self.config.products[product_type]["metadata_mapping"]
)
if (
self.config.result_type == "json"
and "next_page_url_key_path" in self.config.pagination
):
self.config.pagination["next_page_url_key_path"] = string_to_jsonpath(
self.config.pagination.get("next_page_url_key_path", None)
)
def discover_product_types(self):
"""Fetch product types is disabled for `DataRequestSearch`
:returns: empty dict
:rtype: dict
"""
return {}
def clear(self):
"""Clear search context"""
super().clear()
self.next_page_url = None
def query(self, *args, count=True, **kwargs):
"""
performs the search for a provider where several steps are required to fetch the data
"""
product_type = kwargs.get("productType", None)
self._add_product_type_metadata(product_type)
provider_product_type = self._map_product_type(product_type)
kwargs["productType"] = provider_product_type
data_request_id = self._create_data_request(
provider_product_type, product_type, **kwargs
)
request_finished = False
while not request_finished:
request_finished = self._check_request_status(data_request_id)
time.sleep(1)
logger.info("search job for product_type %s finished", provider_product_type)
result = self._get_result_data(data_request_id)
logger.info("result retrieved from search job")
if self._check_uses_custom_filters(product_type):
result = self._apply_additional_filters(
result, self.config.products[product_type]["custom_filters"]
)
kwargs["productType"] = product_type
return self._convert_result_data(result, data_request_id, **kwargs)
def _create_data_request(self, product_type, eodag_product_type, **kwargs):
headers = getattr(self.auth, "headers", "")
try:
metadata_url = self.config.metadata_url + product_type
logger.debug(f"Sending metadata request: {metadata_url}")
metadata = requests.get(metadata_url, headers=headers)
metadata.raise_for_status()
except requests.RequestException:
logger.error(
"metadata for product_type %s could not be retrieved", product_type
)
raise
else:
try:
url = self.config.data_request_url
request_body = format_query_params(
eodag_product_type, self.config, **kwargs
)
logger.debug(
f"Sending search job request to {url} with {str(request_body)}"
)
request_job = requests.post(url, json=request_body, headers=headers)
request_job.raise_for_status()
except requests.RequestException as e:
logger.error(
"search job for product_type %s could not be created: %s, %s",
product_type,
str(e),
request_job.text,
)
else:
logger.info("search job for product_type %s created", product_type)
return request_job.json()["jobId"]
def _check_request_status(self, data_request_id):
logger.info("checking status of request job %s", data_request_id)
status_url = self.config.status_url + data_request_id
status_data = requests.get(status_url, headers=self.auth.headers).json()
if "status_code" in status_data and status_data["status_code"] == 403:
logger.error("authentication token expired during request")
raise requests.RequestException
if status_data["status"] == "failed":
logger.error(
"data request job has failed, message: %s", status_data["message"]
)
raise requests.RequestException
return status_data["status"] == "completed"
def _get_result_data(self, data_request_id):
url = self.config.result_url.format(jobId=data_request_id)
try:
result = requests.get(url, headers=self.auth.headers).json()
next_page_url_key_path = self.config.pagination.get(
"next_page_url_key_path", None
)
if next_page_url_key_path:
try:
self.next_page_url = next_page_url_key_path.find(result)[0].value
logger.debug(
"Next page URL collected and set for the next search",
)
except IndexError:
logger.debug("Next page URL could not be collected")
return result
except requests.RequestException:
logger.error("data from job %s could not be retrieved", data_request_id)
def _convert_result_data(self, result_data, data_request_id, **kwargs):
"""Build EOProducts from provider results"""
results_entry = self.config.results_entry
results = result_data[results_entry]
normalize_remaining_count = len(results)
logger.debug(
"Adapting %s plugin results to eodag product representation"
% normalize_remaining_count
)
products = []
for result in results:
product = EOProduct(
self.provider,
properties_from_json(
result,
self.config.metadata_mapping,
discovery_config=getattr(self.config, "discover_metadata", {}),
),
**kwargs,
)
# use product_type_config as default properties
product.properties = dict(
getattr(self.config, "product_type_config", {}), **product.properties
)
products.append(product)
total_items_nb_key_path = string_to_jsonpath(
self.config.pagination["total_items_nb_key_path"]
)
if len(total_items_nb_key_path.find(result_data)) > 0:
total_items_nb = total_items_nb_key_path.find(result_data)[0].value
else:
total_items_nb = 0
for p in products:
# add the request id to the order link property (required to create data order)
p.properties["orderLink"] = p.properties["orderLink"].replace(
"requestJobId", str(data_request_id)
)
return products, total_items_nb
def _check_uses_custom_filters(self, product_type):
if (
product_type in self.config.products
and "custom_filters" in self.config.products[product_type]
):
return True
return False
def _apply_additional_filters(self, result, custom_filters):
filtered_result = []
results_entry = self.config.results_entry
results = result[results_entry]
path = string_to_jsonpath(custom_filters["filter_attribute"])
indexes = custom_filters["indexes"].split("-")
for record in results:
filter_param = path.find(record)[0].value
filter_value = filter_param[int(indexes[0]) : int(indexes[1])]
filter_clause = "'" + filter_value + "' " + custom_filters["filter_clause"]
if eval(filter_clause):
filtered_result.append(record)
result[results_entry] = filtered_result
return result
def _map_product_type(self, product_type):
"""Map the eodag product type to the provider product type"""
if product_type is None:
return
logger.debug("Mapping eodag product type to provider product type")
return self.config.products.get(product_type, {}).get(
"productType", GENERIC_PRODUCT_TYPE
)
def _add_product_type_metadata(self, product_type):
if (
product_type in self.config.products
and "metadata_mapping" in self.config.products[product_type]
):
for key, mapping in self.config.products[product_type][
"metadata_mapping"
].items():
self.config.metadata_mapping[key] = mapping
|
import re, json, requests
import cv2
# api-endpoint
URL = "http://127.0.0.1:8000/"
# defining a params dict for the parameters to be sent to the API
# sending get request and saving the response as response object
img = cv2.imread('uploads/00000103_001.png')
# print(img)
data= {"photo":img}
resp = requests.post(url=URL, data=data)
# print(resp.status_code)
# #print("cookie :",di)
# print(json.loads(resp.content.decode('utf-8')))
# data= {"userId":69,
# "context":'add_booking'
# }
# resp = s.post(url=URL, data=data)
# # print(resp.status_code)
# # print(s.cookies.get_dict())
# # print(json.loads(resp.content.decode('utf-8')))
# str= json.loads(resp.content.decode('utf-8'))
# print("response from request: ",str)
# data= {"userId":69,
# "context":str
# }
# resp = s.post(url=URL, data=data)
# # print(resp.status_code)
# # print(s.cookies.get_dict())
# # print(json.loads(resp.content.decode('utf-8')))
# str= json.loads(resp.content.decode('utf-8'))
# print("response from request: ",str)
# data= {"userId":69,
# "context":str
# }
# resp = s.post(url=URL, data=data)
# # print(resp.status_code)
# # print(s.cookies.get_dict())
# # print(json.loads(resp.content.decode('utf-8')))
# str= json.loads(resp.content.decode('utf-8'))
# print("response from request: ",str)
|
from ipywidgets import Dropdown
from IPython.display import display, clear_output, Markdown, HTML
def _display(arg):
display(HTML("""
<style>
.output {
display: flex;
align-items: center;
text-align: right;
}
</style>
"""), arg)
EMPTY = '--'
def dropdown_eventhandler(change):
if (change.new == EMPTY):
clear_output(wait=True)
_display(dropdown)
else:
clear_output(wait=True)
_display(dropdown)
_display(Markdown(f'{change.new}.md'))
dropdown = Dropdown(
options=['--', 'ros', 'rosdep', 'gazebo', 'rtabmap'],
value='--',
description='tools:',
disabled=False)
dropdown.observe(dropdown_eventhandler, names='value')
_display(dropdown)
|
import pytest
from selenium import webdriver
@pytest.fixture(name='sub11')
def sub1():
return 9
@pytest.fixture(name='driver1')
def driver():
driver=webdriver.Chrome()
driver.get("http://www.baidu.com")
yield driver #用yield 相当于yield前的是setup 后的是teardown阶段。
driver.close()
|
import numpy as np
import scipy.optimize as opt
from scipy import *
import matplotlib.pyplot as plt
import cosmolopy.constants as cc
import cosmolopy.distance as cd
import cosmolopy.perturbation as cp
#=============Functions ====================
def func(p,x):
w=10.**p[0]* x**p[1] * np.exp(-p[2]*x)
print w.size
return w
def func1(p,x):
w=10.**p[0]* x**p[1] * np.exp(-p[2]*x**1.5)
print w.size
return w
def residuals(p,x,y):
w=10.**p[0]* x**(p[1] ) * np.exp(-p[2]*x)
err=w-y
err=err**2
B=sum(err)
return B
def D(z):
return cp.fgrowth(z,omega_M_0, unnormed=False)
def ErrorZ(z):
return 0.001*(1+z)
def Bias(z):
return sqrt(1+z)
#def func(p,x):
# w=p[0]*np.exp(-p[1]*x)
# print w.size
#return w
#def residuals(p,x,y):
# w=p[0]* np.exp(-p[1]*x)
# err=w-y
# err=err**2
# B=sum(err)
#return B
#================================================
xrange4 = np.linspace(0.6, 2.0, 100)
omega_M_0 = 0.27
xrange0 = np.array([ 0.8, 0.9 , 1.0, 1.1 , 1.2 ,1.3 , 1.4 ,1.5 , 1.6 ,1.7 , 1.8 ,1.9, 2.0])
xrange = np.array([ 0.7, 0.8, 0.9 , 1.0, 1.1 , 1.2 ,1.3 , 1.4 ,1.5 , 1.6 ,1.7 , 1.8 ,1.9, 2.0])
dndzrange = np.array([1.75, 2.68, 2.56 , 2.35, 2.12 , 1.88 ,1.68 , 1.40 ,1.12 , 0.81 ,0.53 , 0.49 ,0.29, 0.16])
dndzrange_ref = np.array([ 1.92, 1.83, 1.68, 1.51, 1.35, 1.20, 1.00, 0.80, 0.58, 0.38, 0.35, 0.21, 0.11])
kmax = np.linspace(0.16004, 0.2, 14)#np.array([0.16004, 0.165, 0.1700,0.1750, 0.1800, 0.1850, 0.1900, 0.1950, 0.20, 0.20, 0.20, 0.20, 0.2, 0.2])
kmin = np.linspace(0.00435,0.00334, 14)
xmin = xrange -0.05
xmax =xrange+0.05
print xmin
print xmax
#========== Fit==================================
p0=[5.52, 0.6, 4.6]
p04=[5.74, 1.14, 3.95]
plsqtot= opt.fmin(residuals, p0, args=(xrange0, dndzrange_ref), maxiter=10000, maxfun=10000)
print ' | c1 | ', '| c2 |', '| c3 |'
print '0 muJy ',plsqtot[0], plsqtot[1], plsqtot[2]
y0=func(plsqtot,xrange)
#========================== Save================
#kmax = np.empty(len(xrange)); kmax.fill(0.2)
volume =np.empty(len(xrange)); volume.fill(15000)
data= concatenate((reshape(volume,(len(xrange),1)),reshape(xmin,(len(xrange),1)),reshape(xmax,(len(xrange),1)),reshape( y0*10**(-3),(len(xrange),1)),reshape(Bias(xrange),(len(xrange),1)),reshape(kmax,(len(xrange),1)),reshape(kmin,(len(xrange),1)),reshape(ErrorZ(xrange),(len(xrange),1))),axis=1)
#datafit= concatenate((reshape(xrange4,(len(xrange4),1)),reshape(y0*10**(-3),(len(xrange4),1))),axis=1)
print 'here is clear'
#savetxt('dndz_Euclid_ref_2.txt' , datafit)
#data4= concatenate((reshape(volume,(len(xrange4),1)),reshape(xmin,(len(xrange4),1)),reshape(xmax,(len(xrange4),1)),reshape(y0*10**(-3),(len(xrange4),1)),reshape(Bias(xrange4),(len(xrange4),1)),reshape(kmax,(len(xrange4),1)),reshape(ErrorZ(xrange4),(len(xrange4),1))),axis=1)
#savetxt('number_EuclidmJy_ref.txt' , data4)
savetxt('number_EuclidmJy_ref.txt' , data)
# PLOTING
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
#ax.set_yscale('log')
p0, = ax.plot(xrange, y0, 'ro')
#plt.xlim(0.,2.5,0.5)
#plt.ylim(1, 2)
p1, = ax.plot(xrange0, dndzrange_ref , 'b', linewidth=2.5,linestyle="-")
#p2, = plt.plot(x, bias_rms7_3 , 'ro')
#p3, = plt.plot(x, bias_rms23 , 'go')
#p01, = plt.plot(xrange, y0, color='#BA5F5F')
#p11, = plt.plot(xrange, Bias(xrange), color='#0404B4')
#p21, = plt.plot(xrange, y2, color='#B40431')
#p31, = plt.plot(xrange, y3, color='#04B404')
#plt.legend([p11] ,['BIAS'], loc='best')
plt.xlabel(r"redshift ($z$)")
plt.ylabel(r"$b(z)$")
plt.savefig('Euclid_dndz.eps')
plt.show()
#OUTPUT FOR dNdz fitting using Obreschkow function, for 0 muJy
#6.33839678537 2.17980796636 1.3923452325
#OUTPUT FOR dNdz fitting using Obreschkow function, for 1muJy
#7.08990441653 2.84040926362 5.21751576048
#OUTPUT FOR dNdz fitting using Obreschkow function, for 7.3 muJy
#5.3074425085 0.72760414229 4.11718714615
##OUTPUT FOR dNdz fitting using Obreschkow function, for 23 muJy
#4.9727473436 0.53260579938 6.66294780323
##OUTPUT FOR dNdz fitting using Obreschkow function, for 100 muJy
#8.28343815119 4.44979424928 23.9380642576
|
from django.contrib.auth.views import login as auth_login
from allauth.socialaccount.models import SocialApp
from allauth.socialaccount.templatetags.socialaccount import get_providers
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.shortcuts import render
from .forms import LoginForm
def login(request):
providers = []
for provider in get_providers(): # settings/INSTALLED_APPS 내에서 활성화된 목록
# social_app속성은 provider에는 없는 속성입니다.
try:
# 실제 Provider별 Client id/secret 이 등록이 되어있는가?
provider.social_app = SocialApp.objects.get(provider=provider.id, sites=settings.SITE_ID)
except SocialApp.DoesNotExist:
provider.social_app = None
providers.append(provider)
return auth_login(request,
#authentication_form=LoginForm,
template_name='accounts/login_form.html',
extra_context={'providers': providers})
@login_required
def profile(request):
return render(request, 'accounts/profile.html')
|
import numpy as np
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from torchvision.datasets import ImageFolder
from options import Options
def load_data(opt):
""" Load Data
Args:
opt ([type]): Argument Parser
Raises:
IOError: Cannot load dataset
Returns:
[type]: DataLoader
"""
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Resize(opt.isize),
# transforms.Normalize
])
dataset = ImageFolder(opt.dataroot, transform)
dataloader = DataLoader(
dataset = dataset,
batch_size = opt.batchsize,
shuffle = opt.shuffle,
num_workers = int(opt.workers),
drop_last = opt.droplast,
worker_init_fn=(None if opt.manualseed == -1
else lambda x: np.random.seed(opt.manualseed))
)
return dataloader
if __name__ == '__main__':
opt = Options().parse()
dataloader = load_data(opt)
print(dataloader) |
from django.shortcuts import render, get_object_or_404
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from .models import Post, Comment
from django.urls import reverse_lazy
from .forms import CommentForm
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
class BlogListView(ListView):
model = Post
template_name = 'index.html'
# class BlogDetailView(DetailView):
def postDetailView(request, pk):
# model = Post
# template_name = 'detail.html'
# form_class = CommentForm
post = get_object_or_404(Post, pk=pk)
comments = post.comments.filter(active=True)
new_comment = None
# def add_comment(self, request):
if request.method == 'POST':
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
new_comment = comment_form.save(commit=False)
new_comment.post = post
new_comment.save()
else:
comment_form = CommentForm()
return render(request, 'detail.html', {'post': post, 'comments': comments,
'new_comment': new_comment, 'comment_form': comment_form})
class BlogCreateView(LoginRequiredMixin, CreateView):
model = Post
template_name = 'new.html'
fields = ['title', 'body']
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class BlogUpdateView(LoginRequiredMixin, UpdateView):
model = Post
template_name = 'edit.html'
fields = ['title', 'body']
class BlogDeleteView(LoginRequiredMixin, DeleteView):
model = Post
template_name = 'delete.html'
success_url = reverse_lazy('home')
|
import tempfile
from pathlib import Path
import cv2
from fpdf import FPDF
from PIL import Image
import utils
def create_markers(marker_type):
marker_ids = utils.get_marker_ids(marker_type)
if marker_type == 'robots':
marker_ids = 5 * marker_ids + marker_ids[:4]
elif marker_type == 'cubes':
marker_ids = [marker_id for marker_id in marker_ids[:8] for _ in range(6)]
elif marker_type == 'corners':
marker_ids = 7 * marker_ids
output_dir = 'printouts'
pdf_name = 'markers-{}.pdf'.format(marker_type)
orientation = 'P'
sticker_padding_mm = 3
marker_params = utils.get_marker_parameters()
paper_params = utils.get_paper_parameters(orientation)
marker_length_mm = 1000 * marker_params['marker_length']
scale_factor = (marker_length_mm / paper_params['mm_per_in']) * paper_params['ppi'] / marker_params['marker_length_pixels']
sticker_length_mm = marker_params['sticker_length_mm'][marker_type]
stickers_per_row = int((paper_params['width_mm'] - 2 * paper_params['padding_mm']) / (sticker_length_mm + sticker_padding_mm))
aruco_dict = cv2.aruco.Dictionary_get(marker_params['dict_id'])
# Create PDF
pdf = FPDF(orientation, 'mm', 'letter')
pdf.add_page()
with tempfile.TemporaryDirectory() as tmp_dir_name:
for i, marker_id in enumerate(marker_ids):
image_path = str(Path(tmp_dir_name) / '{}.png'.format(marker_id))
Image.fromarray(cv2.aruco.drawMarker(aruco_dict, marker_id, int(scale_factor * marker_params['marker_length_pixels']))).save(image_path)
center_x = (sticker_length_mm + sticker_padding_mm) * (i % stickers_per_row + 1)
center_y = (sticker_length_mm + sticker_padding_mm) * (i // stickers_per_row + 1)
pdf.rect(
x=(center_x - sticker_length_mm / 2 - pdf.line_width / 2),
y=(center_y - sticker_length_mm / 2 - pdf.line_width / 2),
w=(sticker_length_mm + pdf.line_width),
h=(sticker_length_mm + pdf.line_width))
pdf.image(image_path, x=(center_x - marker_length_mm / 2), y=(center_y - marker_length_mm / 2), w=marker_length_mm, h=marker_length_mm)
# Save PDF
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir(parents=True)
pdf.output(output_dir / pdf_name)
if __name__ == '__main__':
create_markers('robots')
create_markers('cubes')
create_markers('corners')
|
"""Вычислить квадратное уравнение ax2 + bx + c = 0 (*)
D = b2 – 4ac;
x1,2 = (-b +/- sqrt (D)) / 2a
Предусмотреть 3 варианта:
Два действительных корня
Один действительный корень
Нет действительных корней
"""
import math
a = int(input())
b = int(input())
c = int(input())
try:
d = b ** 2 - 4 * a * c
x1 = (-b - math.sqrt(d)) / (2 * a)
x2 = (-b + math.sqrt(d)) / (2 * a)
if x1 != x2:
print(f"x1: {x1} x2: {x2}")
elif x1 == x2:
print(f"x1, x2 = {x1}")
except:
print('No')
|
import tensorflow as tf
def get_tensor_shape(x):
a = x.get_shape().as_list()
b = [tf.shape(x)[i] for i in range(len(a))]
return [aa if type(aa) is int else bb for aa, bb in zip(a, b)]
# [b, n, c]
def sample_1d(
img, # [b, h, c]
y_idx, # [b, n], 0 <= pos < h, dtpye=int32
):
b, h, c = get_tensor_shape(img)
b, n = get_tensor_shape(y_idx)
b_idx = tf.range(b, dtype=tf.int32) # [b]
b_idx = tf.expand_dims(b_idx, -1) # [b, 1]
b_idx = tf.tile(b_idx, [1, n]) # [b, n]
y_idx = tf.clip_by_value(y_idx, 0, h - 1) # [b, n]
a_idx = tf.stack([b_idx, y_idx], axis=-1) # [b, n, 2]
return tf.gather_nd(img, a_idx)
# [b, n, c]
def interp_1d(
img, # [b, h, c]
y, # [b, n], 0 <= pos < h, dtype=float32
):
b, h, c = get_tensor_shape(img)
b, n = get_tensor_shape(y)
y_0 = tf.floor(y) # [b, n]
y_1 = y_0 + 1
_sample_func = lambda y_x: sample_1d(
img,
tf.cast(y_x, tf.int32)
)
y_0_val = _sample_func(y_0) # [b, n, c]
y_1_val = _sample_func(y_1)
w_0 = y_1 - y # [b, n]
w_1 = y - y_0
w_0 = tf.expand_dims(w_0, -1) # [b, n, 1]
w_1 = tf.expand_dims(w_1, -1)
return w_0 * y_0_val + w_1 * y_1_val
# [b, h, w, 3]
def apply_bg(
bg, # [b, ?, ?, d*3*4]
guide, # [b, h, w], 0 <= guide <= 1
in_img, # [b, h, w, 3]
):
b,hbg,wbg,d,i,o = get_tensor_shape(bg)
bg = tf.reshape(bg,(b,hbg,wbg,d*i*o))
b, _, _, d34, = get_tensor_shape(bg)
b, h, w, = get_tensor_shape(guide)
b, h, w, _, = get_tensor_shape(in_img)
d = d34 // 3 // 4
bg = tf.image.resize(bg, [h, w]) # [b, h, w, d*3*4]
coef = interp_1d(
tf.reshape(bg, [b * h * w, d, 3 * 4]), # [b*h*w, d, 3*4]
(d - 1) * tf.reshape(guide, [b * h * w, 1]), # [b*h*w, 1]
) # [b*h*w, 1, 3*4]
coef = tf.reshape(coef, [b, h, w, 3, 4]) # [b, h, w, 3, 4]
in_img = tf.reshape(in_img, [b, h, w, 3, 1]) # [b, h, w, 3, 1]
in_img = tf.pad(
in_img,
[[0, 0], [0, 0], [0, 0], [0, 1], [0, 0]],
mode='CONSTANT',
constant_values=1,
) # [b, h, w, 4, 1]
out_img = tf.matmul(coef, in_img) # [b, h, w, 3, 1]
out_img = tf.reshape(out_img, [b, h, w, 3]) # [b, h, w, 3]
out_img = tf.clip_by_value(out_img, 0, 1)
return out_img
|
class Chapter:
def __init__(self, title, url):
self.title = title
self.url = url
self.text = None
def set_body_text(self, text):
self.text = text
def get_post_id(self):
if "threads" in self.url:
return f"post-{self.url.split('-')[-1]}"
elif "posts" in self.url:
return f"post-{self.url.split('/')[-2]}"
else:
print(f"Unparsable url! {self.url}")
return None
|
from django.core.management import BaseCommand
from custom.onse.tasks import update_facility_cases_from_dhis2_data_elements
class Command(BaseCommand):
help = ('Update facility_supervision cases with indicators collected '
'in DHIS2 over the last quarter.')
def handle(self, *args, **options):
update_facility_cases_from_dhis2_data_elements.apply(kwargs={
'print_notifications': True})
|
from rest_framework.exceptions import APIException
class NotFoundException(APIException):
status_code = 404
|
from zope.interface import Interface
from zope.interface import implements
from zope.component import adapts, getMultiAdapter
from plone.memoize.instance import memoize
from plone.app.portlets.portlets import navigation
from plone.app.layout.navigation.interfaces import INavtreeStrategy
from plone.app.layout.navigation.interfaces import INavigationQueryBuilder
from plone.app.layout.navigation.root import getNavigationRoot
from plone.app.layout.navigation.navtree import buildFolderTree
from zope import schema
from zope.formlib import form
from Acquisition import aq_inner
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.CMFPlone import utils
from collective.portlet.sitemap import NavigationExtendedPortletMessageFactory as _
try:
from plone.app.form.widgets.uberselectionwidget import UberSelectionWidget
except ImportError:
UberSelectionWidget = None
pass
class INavigationExtendedPortlet(navigation.INavigationPortlet) :
"""A portlet
It inherits from INavigationPortlet
"""
displayAsSiteMap = schema.Bool(
title=_(u"label_display_as_site_map", default=u"Display as Site Map"),
description=_(u"help_display_as_site_map",
default=u"If checked display all folders as a site map"),
default=True,
required=False)
siteMapDepth = schema.Int(
title=_(u"label_site_map_depth",
default=u"Site map depth"),
description=_(u"help_site_map_depth",
default=u"If previous field is checked set the site map depth"),
default=2,
required=False)
class Assignment(navigation.Assignment):
"""Portlet assignment.
This is what is actually managed through the portlets UI and associated
with columns.
"""
implements(INavigationExtendedPortlet)
title = _(u'Navigation Extended')
name = u""
root = None
currentFolderOnly = False
includeTop = False
topLevel = 0
bottomLevel = 0
displayAsSiteMap = True
siteMapDepth = 2
def __init__(self, name=u"", root=None, currentFolderOnly=False, includeTop=False, topLevel=0, bottomLevel=0, displayAsSiteMap=True, siteMapDepth = 2):
self.name = name
self.root = root
self.currentFolderOnly = currentFolderOnly
self.includeTop = includeTop
self.topLevel = topLevel
self.bottomLevel = bottomLevel
self.displayAsSiteMap = displayAsSiteMap
self.siteMapDepth = siteMapDepth
class Renderer(navigation.Renderer):
"""Portlet renderer.
This is registered in configure.zcml. The referenced page template is
rendered, and the implicit variable 'view' will refer to an instance
of this class. Other methods can be added and referenced in the template.
"""
@memoize
def getNavTree(self, _marker=[]):
context = aq_inner(self.context)
# Special case - if the root is supposed to be pruned, we need to
# abort here
queryBuilder = getMultiAdapter((context, self.data), INavigationExtendedQueryBuilder)
strategy = getMultiAdapter((context, self.data), INavtreeStrategy)
return buildFolderTree(context, obj=context, query=queryBuilder(), strategy=strategy)
recurse = ViewPageTemplateFile('navigation_extended_recurse.pt')
class AddForm(navigation.AddForm):
"""Portlet add form.
This is registered in configure.zcml. The form_fields variable tells
zope.formlib which fields to display. The create() method actually
constructs the assignment that is being added.
"""
form_fields = form.Fields(INavigationExtendedPortlet)
if UberSelectionWidget is not None:
form_fields['root'].custom_widget = UberSelectionWidget
def create(self, data):
return Assignment(name=data.get('name', u""),
root=data.get('root', u""),
currentFolderOnly=data.get('currentFolderOnly', False),
includeTop=data.get('includeTop', False),
topLevel=data.get('topLevel', 0),
bottomLevel=data.get('bottomLevel', 0),
displayAsSiteMap=data.get('displayAsSiteMap', True),
siteMapDepth=data.get('siteMapDepth', 2))
class EditForm(navigation.EditForm):
"""Portlet edit form.
This is registered with configure.zcml. The form_fields variable tells
zope.formlib which fields to display.
"""
form_fields = form.Fields(INavigationExtendedPortlet)
if UberSelectionWidget is not None:
form_fields['root'].custom_widget = UberSelectionWidget
class INavigationExtendedQueryBuilder(INavigationQueryBuilder):
"""An object which returns a catalog query when called, used to
construct a navigation tree.
"""
def __call__():
"""Returns a mapping describing a catalog query used to build a
navigation structure.
"""
class NavigationExtendedQueryBuilder(object):
"""Build a navtree query based on the settings in navtree_properties
and those set on the portlet.
"""
implements(INavigationExtendedQueryBuilder)
adapts(Interface, INavigationExtendedPortlet)
def __init__(self, context, portlet):
self.context = context
self.portlet = portlet
portal_properties = utils.getToolByName(context, 'portal_properties')
navtree_properties = getattr(portal_properties, 'navtree_properties')
portal_url = utils.getToolByName(context, 'portal_url')
# Acquire a custom nav query if available
customQuery = getattr(context, 'getCustomNavQuery', None)
if customQuery is not None and utils.safe_callable(customQuery):
query = customQuery()
else:
query = {}
# Construct the path query
rootPath = getNavigationRoot(context, relativeRoot=portlet.root)
currentPath = '/'.join(context.getPhysicalPath())
# If we are above the navigation root, a navtree query would return
# nothing (since we explicitly start from the root always). Hence,
# use a regular depth-1 query in this case.
if portlet.displayAsSiteMap :
query['path'] = {'query' : rootPath, 'depth' : portlet.siteMapDepth}
else :
if not currentPath.startswith(rootPath):
query['path'] = {'query' : rootPath, 'depth' : 1}
else:
query['path'] = {'query' : currentPath, 'navtree' : 1}
#print query
topLevel = portlet.topLevel or navtree_properties.getProperty('topLevel', 0)
if topLevel and topLevel > 0:
query['path']['navtree_start'] = topLevel + 1
# XXX: It'd make sense to use 'depth' for bottomLevel, but it doesn't
# seem to work with EPI.
# Only list the applicable types
query['portal_type'] = utils.typesToList(context)
# Apply the desired sort
sortAttribute = navtree_properties.getProperty('sortAttribute', None)
if sortAttribute is not None:
query['sort_on'] = sortAttribute
sortOrder = navtree_properties.getProperty('sortOrder', None)
if sortOrder is not None:
query['sort_order'] = sortOrder
# Filter on workflow states, if enabled
if navtree_properties.getProperty('enable_wf_state_filtering', False):
query['review_state'] = navtree_properties.getProperty('wf_states_to_show', ())
self.query = query
def __call__(self):
return self.query
|
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BOARD)
GPIO.setup(16, GPIO.IN)
rearm = 6 #delay for rearming the PIR
startup = 2 #startup delay
delay = 0.1 #loop delay
try:
sleep(startup)
while True:
if(GPIO.input(16)):
print('DETECTED. Rearming...')
sleep(rearm)
print('Ready.')
sleep(delay)
except:
GPIO.cleanup()
|
import math
NUMBER_OF_EPSILON = 1000
def dichotomy(function,l, r, epsilon):
interval = r - l
iteration = 0
delta = epsilon / 2
left_border = l
right_border = r
# print('start_point', 'end_point', 'length', 'lenght/prev_lenght', 'x1', 'f(x1)', 'x2', 'f(x2)')
while abs(right_border - left_border) >= epsilon:
iteration += 1
middle = (left_border + right_border) / 2
if function(middle - delta) > function(middle + delta):
left_border = middle
else:
right_border = middle
# print(left_border, right_border, right_border - left_border, (right_border - left_border) / interval)
prev_leng = interval
interval = right_border - left_border
return left_border + right_border / 2, iteration, right_border - left_border, (interval) / prev_leng
def golden_ratio(function, l, r, epsilon):
x1 = l + (3 - math.sqrt(5)) / 2 * (r - l)
x2 = l + (math.sqrt(5) - 1) / 2 * (r - l)
f1 = function(x1)
f2 = function(x2)
iters = 0
interval = r - l
# print('start_point', 'end_point', 'length', 'lenght/prev_lenght', 'x1', 'f(x1)', 'x2', 'f(x2)')
while (interval > epsilon):
if function(x1) > function(x2):
l = x1
x1 = x2
x2 = l + (math.sqrt(5) - 1) / 2 * (r - l)
f1 = f2
f2 = function(x2)
else:
r = x2
x2 = x1
x1 = l + (3 - math.sqrt(5)) / 2 * (r - l)
f2 = f1
f1 = function(x1)
prev_leng = interval
interval = r - l
iters += 1
# print(l, r, r - l, (r - l) / prev_leng, x1, x2, f1, f2)
return (l + r) / 2, epsilon, iters, r - l, (r - l) / prev_leng
def fibonacci(function, l, r, epsilon):
iteration = 0
count = 4
n = get_n(l, r, epsilon)
left_border = l
right_border = r
interval = right_border - left_border
x1 = left_border + get_fibonacci(n) / get_fibonacci(n + 2) * interval
x2 = left_border + get_fibonacci(n + 1) / get_fibonacci(n + 2) * interval
# print('start_point', 'end_point', 'length', 'lenght/prev_lenght', 'x1', 'f(x1)', 'x2', 'f(x2)')
for k in range(n):
k += 1
count +=2
fx1 = function(x1)
fx2 = function(x2)
interval = right_border - left_border
if function(x1) <= function(x2):
right_border = x2
x2 = x1
x1 = left_border + get_fibonacci(n - k - 1) * (right_border - left_border) / get_fibonacci(n - k + 1)
fx2 = fx1
fx1 = function(x1)
else:
left_border = x1
x1 = x2
x2 = left_border + get_fibonacci(n - k) * (right_border - left_border) / get_fibonacci(n - k + 1)
fx1 = fx2
fx2 = function(x2)
if function(x1) == function(x2):
return x1
# print(left_border, right_border, right_border - left_border, (right_border - left_border) / interval, x1, x2, fx1, fx2)
prev_leng = interval
interval = r - l
iteration += 1
return left_border + right_border / 2, iteration, right_border - left_border, (interval) / prev_leng
def get_fibonacci(n):
return round(1 / math.sqrt(5) * (((1 + math.sqrt(5)) / 2) ** n - ((1 - math.sqrt(5)) / 2) ** n))
def get_n(left_border, right_border, epsilon):
argument = math.sqrt(5) * (((right_border - left_border) / epsilon) - 0.5)
return math.ceil(math.log(argument, ((1 + math.sqrt(5)) / 2)))
def combined_brent(function,l, r, epsilon):
a = l
c = r
iteration = 0
x = w = v = a + 0.381966011 * (c - a)
fx = fw = fv = function(x)
d = 0
e = d
interval = c - a
t = 1e-4
# print('start_point', 'end_point', 'length', 'lenght/prev_lenght', 'x1', 'f(x1)', 'x2', 'f(x2)')
while True:
tol = epsilon * abs(x) + t
if abs(x - (a + c) / 2) <= 2 * tol - (c - a) / 2:
break
r = q = p = 0
if abs(e) > tol:
r = (x - w) * (fx - fv)
q = (x - v) * (fx - fw)
p = (x - v) * q - (x - w) * r
q = 2 * (q - r)
if q > 0:
p = -p
q = abs(q)
r, e = e, d
if (abs(p) < abs(0.5 * q * r)) and (q * (a - x) < p) and (p < q * (c - x)):
d = p / q
u = x + d
if (u - a < 2 * tol) and (c - u < 2 * tol):
d = tol if x < (a + c) / 2 else -tol
else:
if x < (c + a) / 2:
e = c - x
d = 0.381966011 * e
else:
e = a - x
d = 0.381966011 * e
if tol <= abs(d):
u = x + d
elif d > 0:
u = x + tol
else:
u = x - tol
iteration += 1
fu = function(u)
last = [a, c]
if fu <= fx:
if u >= x:
a = x
else:
c = x
v = w
w = x
x = u
fv = fw
fw = fx
fx = fu
else:
if u >= x:
c = u
else:
a = u
if fu <= fw or w == x:
v = w
w = u
fv = fw
fw = fu
elif fu <= fv or v == x or v == w < epsilon:
v = u
fv = fu
prev_len = interval
interval = c - a
# print(last[0], last[1], interval, (c - a) / interval, a, c, function(a), function(c))
return a + c / 2, iteration, interval, (c - a) / prev_len
def parabolic(function, l, m, r, epsilon):
iteration = 0
x1 = l
x3 = r
x2 = m
prev_len = r - l
xi = 0
interval = r - l
print('start_point', 'end_point', 'length', 'lenght/prev_lenght', 'x1', 'f(x1)', 'x2', 'f(x2)')
while function(x2) < function(x1) and function(x2) < function(x3):
u = x2 - 0.5 * ((x2 - x1) ** 2 * (function(x2) - function(x3)) - (x2 - x3) ** 2 * (function(x2) - function(x1))) / (
(x2 - x1) * (function(x2) - function(x3)) - (x2 - x3) * (function(x2) - function(x1)))
fu = function(u)
if function(u) <= function(x2):
if u < x2:
x3 = x2
else:
x1 = x2
x2 = u
else:
if x2 < u:
x3 = u
else:
x1 = u
if ((iteration > 0) and (abs(xi - u) < epsilon)):
break
xi = u
print(x1, x3, abs(x3 - x1), (r - l) / prev_len, x1, x2,
f1, f2)
iteration += 1
prev_len = interval
interval = r - l
return u, iteration, r - l, (r-l) / prev_len
def f1(x):
return math.sin(x) * math.pow(x, 3)
def f2(x):
return 0.007 * x ** 6 - 0.15 * x ** 5 + 1.14 * x ** 4 - 3.5 * x ** 3 + 2.9 * x ** 2 + 2.95 * x + 2.25
l = -1
r = 1
e = 0.001
# f = f1
# res = golden_ratio(f1, l, r, e)
# print(golden_ratio(f1, -1, 1, 0.001))
a = 1
for i in range(5):
a /= 10
res1 = combined_brent(f1, l, r, a)
print(res1) |
# Copyright (c) 2019-2023, Jonas Eschle, Jim Pivarski, Eduardo Rodrigues, and Henry Schreiner.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
from __future__ import annotations
import math
import pytest
import vector.backends.object
def test_xy():
vec = vector.backends.object.VectorObject2D(
azimuthal=vector.backends.object.AzimuthalObjectXY(3, 4)
)
assert vec.y == pytest.approx(4)
def test_rhophi():
vec = vector.backends.object.VectorObject2D(
azimuthal=vector.backends.object.AzimuthalObjectRhoPhi(5, math.atan2(4, 3))
)
assert vec.y == pytest.approx(4)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from random import randint, random, shuffle
import math
from collections import namedtuple
import numpy as np
import matplotlib.pyplot as plt
from copy import copy
Point = namedtuple("Point", ['x', 'y'])
#lengths = None
def length(point1, point2):
return math.sqrt((point1.x - point2.x)**2 + (point1.y - point2.y)**2)
def total_length(points, nodeCount, solution):
obj = length(points[solution[-1]], points[solution[0]])
for index in range(0, nodeCount-1):
obj += length(points[solution[index]], points[solution[index+1]])
return obj
def draw_solution(points, nodeCount, solution, solution_old):
orderedXY = np.array([[points[i].x,points[i].y] for i in solution])
#orderedXY = np.append(orderedXY,orderedXY[0])
plt.plot(orderedXY[:,0],orderedXY[:,1],'ro-')
orderedXY = np.array([[points[i].x,points[i].y] for i in solution_old])
#orderedXY = np.append(orderedXY,orderedXY[0])
plt.plot(orderedXY[:,0],orderedXY[:,1],'bo-')
def find_greedy(points, nodeCount):
solution = [0]
current_node = 0
point_used = [False] * nodeCount
point_used[0] = True
for i in range(nodeCount-1):
closest_dist = 100000000000000
for j, point in enumerate(points):
if i != j:
if not point_used[j] and length(point, points[current_node]) < closest_dist :
closest_dist = length(point, points[current_node])
best_node = j
solution.append(best_node)
point_used[best_node] = True
current_node = best_node
return solution
def swap(solution, heads):
head_a, head_b = heads
solution[head_a:head_b] = reversed(solution[head_a:head_b])
return solution
def random_heads(nodeCount):
a = randint(0, nodeCount-1)
b = randint(0, nodeCount-1)
if abs(a - b) <= 1:
return random_heads(nodeCount)
return (min(a,b), max(a,b))
def boltzmann_factor(solution, points, nodeCount, heads, temp):
head_a, head_b = heads
point_head_a = points[solution[head_a]]
point_head_b = points[solution[head_b]]
point_tail_a = points[solution[head_a-1]]
point_tail_b = points[solution[head_b-1]]
current_length = length(point_head_a, point_tail_a) + length(point_head_b, point_tail_b)
new_length = length(point_head_a, point_head_b) + length(point_tail_a, point_tail_b)
delta_energy = new_length - current_length
#print("\t dE = {}".format(delta_energy))
try:
factor = math.exp(-delta_energy / temp)
except:
factor = 0
return factor, delta_energy
def find_temp_scale(solution, points):
scale = [length(points[i],points[j]) for (i,j) in zip(solution[1:], solution[:-1])]
return np.std(scale)
def metropolis(solution, points, nodeCount, temp):
heads = random_heads(nodeCount)
factor, delta_length = boltzmann_factor(solution, points, nodeCount, heads, temp)
if factor > 1 or random() < factor:
solution = swap(solution, heads)
else:
delta_length = 0
return solution, delta_length
def solve_it(input_data):
#global lengths
assert(swap([1,2,3,4,5,6,7],(3,5)) == [1,2,3,5,4,6,7])
# parse the input
lines = input_data.split('\n')
nodeCount = int(lines[0])
points = []
for i in range(1, nodeCount+1):
line = lines[i]
parts = line.split()
points.append(Point(float(parts[0]), float(parts[1])))
#lengths = np.array([[length(points[i], points[j]) for j in range(nodeCount)] for i in range(nodeCount)])
# build a greedy solution
# visit the nodes in the order they appear in the file
#greedy_solution = find_greedy(points, nodeCount)
greedy_solution = list(range(nodeCount))
greedy_length = total_length(points, nodeCount, greedy_solution)
print("Found greedy solution: ",greedy_solution)
bestYet = greedy_length
bestSol = copy(greedy_solution)
all_lengths = [greedy_length]
best_lengths = [greedy_length]
temp_list = []
solution = greedy_solution
n_rounds = 1
starting_temp = find_temp_scale(solution, points)
for i in range(n_rounds):
#shuffle(solution)
#all_lengths.append(total_length(points, nodeCount, solution))
#best_lengths.append(bestYet)
print("starting round %d"%i)
temp = starting_temp
n_steps = 20000000
temp_decrease_factor = (1. - 8./n_steps)
for _ in range(n_steps):
temp = temp*temp_decrease_factor
temp_list.append(temp)
solution, delta_length = metropolis(solution, points, nodeCount, temp)
#print(total_length(points, nodeCount, solution))
current_length = all_lengths[-1] + delta_length
all_lengths.append(current_length)
if current_length < bestYet:
bestYet = current_length
bestSol = copy(solution)
best_lengths.append(bestYet)
print("best value yet: ",bestYet)
#plt.figure()
#plt.plot(all_lengths,'b')
#plt.plot(best_lengths,'r')
#plt.figure()
#plt.plot(temp_list,'b')
# calculate the length of the tour
#obj = total_length(points, nodeCount, solution)
#plt.figure()
#draw_solution(points, nodeCount, bestSol, bestSol)
#plt.show()
#if obj > bestYet:
# obj = bestYet
# solution = copy(bestSol)
# prepare the solution in the specified output format
output_data = '%.2f' % total_length(points, nodeCount, bestSol) + ' ' + str(0) + '\n'
output_data += ' '.join(map(str, bestSol))
return output_data
import sys
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
with open(file_location, 'r') as input_data_file:
input_data = input_data_file.read()
print(solve_it(input_data))
else:
print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/tsp_51_1)')
|
#!flask/bin/python
from flask import Flask, jsonify
from bs4 import BeautifulSoup
import requests
from nltk.corpus import wordnet as wn
from textblob import TextBlob
from nltk.tokenize import sent_tokenize, word_tokenize
from flask.ext.cors import CORS
app = Flask(__name__)
CORS(app)
@app.route('/<subject>')
def index(subject):
def extractTextFromWiki(topic):
corpus = []
r = requests.get("https://en.wikipedia.org/wiki/" + topic)
soup = BeautifulSoup(r.text)
text = soup.find(attrs={"class": "mw-content-ltr"})
for i in text.find_all(['p']):
try:
corpus.append(str(i.text).strip())
except:
pass
return corpus
def pickSentences(textList):
sentences = ""
for i in xrange(1,5):
candidate = textList[i]
try:
if(len(candidate) < 200):
continue
else:
sentences += (" " + candidate)
except:
pass
return sent_tokenize(sentences)
def answerize(word):
synsets = wn.synsets(word, pos='n')
if len(synsets) == 0:
return []
else:
synset = synsets[0]
hypernym = synset.hypernyms()[0]
hyponyms = hypernym.hyponyms()
similar_words = []
counter = 0
words = 0
for hyponym in hyponyms:
if words == 4:
break
counter += 1
if(counter%2 == 0):
similar_word = hyponym.lemmas()[0].name().replace('_', ' ')
if similar_word != word:
similar_words.append(similar_word)
words += 1
return similar_words
def questionize(sentenceList):
questionList = []
answersList = []
json = {}
foundQuestion = False
#for sentence in sentenceList:
current = ""
sentence = sentenceList[1]
#print sentence
sentence = TextBlob(sentence)
for word,POS in sentence.tags:
if(POS == "NN" and foundQuestion == False):
current += " " + "__________"
answers = answerize(word)
answers.append(word)
answersList.append(answers)
foundQuestion = True
else:
current += " " + str(word)
questionList.append(current)
json["question"] = current
json["answers"] = answersList[0]
return jsonify(json)
corpus = extractTextFromWiki(subject)
pickedSentences = pickSentences(corpus)
return questionize(pickedSentences)
@app.route('/query/<query>')
def search(query):
def getInformation(query):
r = requests.get("https://en.wikipedia.org/wiki/" + query)
corpus = []
soup = BeautifulSoup(r.text)
text = soup.find(attrs={"class": "mw-content-ltr"})
for i in text.find_all(['p']):
try:
corpus.append(str(i.text).strip())
except:
pass
return corpus
def tfidf(query, words):
documents = []
corpusFreq = {}
def addDocument(doc, wordFreq):
dict = {}
for word in wordFreq:
dict[word] = 1+dict.get(word, 0.0)
corpusFreq[word] = 1+corpusFreq.get(word, 0.0)
for k in dict:
dict[k] = dict[k]/float(len(wordFreq))
documents.append([doc, dict])
# Populate documents and corpusFreq
docs = getInformation(query)
for i in docs:
print word_tokenize(i)
addDocument(str(i), word_tokenize(i))
def similarities(queries):
highScore = 0
highDoc = "No relevant information!"
queryFreq = {}
for word in queries:
queryFreq[word] = queryFreq.get(word, 0.0)+1
for k in queryFreq:
queryFreq[k] = queryFreq[k] / float(len(queries))
for doc in documents:
score = 0.0
doc_dict = doc[1]
for word in queryFreq:
if doc_dict.has_key(word):
score += (queryFreq[word]/corpusFreq[word])+(doc_dict[word] / corpusFreq[word])
if(score > highScore):
highScore = score
highDoc = doc[0]
return highDoc
return similarities(words)
queries = query.split('_')
json = {"answer": tfidf(queries[0], [queries[1]])}
return jsonify(json)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 19 13:52:15 2017
@author: william2
"""
import sqlite3
db=sqlite3.connect("GrandeFortaleza.db")
c=db.cursor()
CHANGE="DELETE FROM ways_tags WHERE key='fixme';"
c.execute(CHANGE)
CHANGE="DELETE FROM nodes_tags WHERE key='fixme';"
c.execute(CHANGE)
db.commit()
db.close() |
# 10/04/2020 --- DD/MM/YYYY
# https://www.hackerrank.com/challenges/mark-and-toys/problem?h_l=interview&h_r=next-challenge&h_v=zen&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=sorting
def maximumToys(prices, k):
result = 0
# Sort first
prices.sort()
# Then greedy
for i in range(len(prices)):
if k < prices[i]:
break
k -= prices[i]
result += 1
return result
with open("./test.txt", "r") as inFile:
lines = inFile.readlines()
print(maximumToys(list(map(int, lines[1].rstrip().split())), int(
lines[0].split()[1])))
|
"""
每个磁盘大小d[i]
每个分区大小p[i]
"""
def is_allocable(d: list, q: list):
# 磁盘分区下标
index = 0
# 磁盘数量
length = len(d)
for space in q: # 分区大小
# 找到符合条件的磁盘
while index < length and space > d[index]:
index += 1
if index >= length:
return False
# 给分区分配磁盘
d[index] -= space
return True
if __name__ == '__main__':
d = [120, 120, 256]
p = [60, 60, 80, 90, 100]
print(is_allocable(d, p))
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 10 19:51:40 2017
@author: LALIT ARORA
"""
# This is a Credential class to send credentials to the mail app.
class cred:
def sendid():
return "ENTER SENDER'S EMAIL ID"
def sendpass():
return "ENTER YOUR PASSWORD HERE"
|
#!/usr/bin/env python3
"""
This is a good foundation to build your robot code on
"""
import wpilib
import rev
from networktables import NetworkTables
from networktables.util import ntproperty
import math
from wpilib.drive import DifferentialDrive
class MyRobot(wpilib.TimedRobot):
def robotInit(self):
"""
This function is called upon program startup and
should be used for any initialization code.
"""
self.lt_motor = rev.CANSparkMax(2, rev.MotorType.kBrushless)
self.lf_motor = rev.CANSparkMax(3, rev.MotorType.kBrushless)
self.lb_motor = rev.CANSparkMax(1, rev.MotorType.kBrushless)
self.rt_motor = rev.CANSparkMax(5, rev.MotorType.kBrushless)
self.rf_motor = rev.CANSparkMax(4, rev.MotorType.kBrushless)
self.rb_motor = rev.CANSparkMax(6, rev.MotorType.kBrushless)
self.left = wpilib.SpeedControllerGroup(self.lt_motor, self.lf_motor, self.lb_motor)
self.right = wpilib.SpeedControllerGroup(self.rt_motor, self.rf_motor, self.rb_motor)
self.drive = DifferentialDrive(self.left, self.right)
self.lt_motor.setInverted(True)
self.rt_motor.setInverted(True)
self.joystick = wpilib.Joystick(0)
self.previous_button = False
self.gear_switcher = wpilib.DoubleSolenoid(0, 1)
def autonomousInit(self):
"""This function is run once each time the robot enters autonomous mode."""
pass
def autonomousPeriodic(self):
"""This function is called periodically during autonomous."""
pass
def teleopPeriodic(self):
"""This function is called periodically during operator control."""
#self.rev_motor.set(-self.joystick.getRawAxis(1))
#print(self.rev_motor.getEncoder().getVelocity())
self.drive.tankDrive(-self.joystick.getRawAxis(1), -self.joystick.getRawAxis(5))
#self.drive.arcadeDrive(self.joystick.getRawAxis(2), -self.joystick.getRawAxis(1))
current_button = self.joystick.getRawButton(1)
clicked = self.previous_button and not current_button
if clicked:
if self.gear_switcher.get() == wpilib.DoubleSolenoid.Value.kForward:
self.gear_switcher.set(wpilib.DoubleSolenoid.Value.kReverse)
else:
self.gear_switcher.set(wpilib.DoubleSolenoid.Value.kForward)
self.previous_button = current_button
def deadzone(self, value, min = .2):
if -min < value < min:
return 0
else:
scaled_value = (abs(value) - min) / (1 - min)
return math.copysign(scaled_value, value)
if __name__ == "__main__":
wpilib.run(MyRobot) |
from TuringMachine import *
from FileReader import readFile
filename = " "
while len(filename) > 0:
try:
filename = input("Enter filename of Turing Machine (Leave blank to exit): ")
tm = readFile(filename)
inputStr = " "
while len(inputStr) > 0:
try:
inputStr = input("Enter space separated input (integers only; leave blank to exit): ")
inputs = [int(x) for x in inputStr.split(" ")]
if len(inputStr) > 0:
tm.run(inputs)
except ValueError:
if len(inputStr) > 0:
print("Input format error. Try again.")
except FileNotFoundError:
if len(filename) > 0:
print("File not found. Input another filename.") |
import tkinter as tk
import random
class Grid:
def __init__(self,n):
self.size=n
self.cells=self.generate_empty_grid()
self.compressed=False
self.merge=False
self.moved=False
self.current_score=0
def generate_empty_grid(self):
cells=[]
for i in range(self.size):
cells.append([])
for j in range(self.size):
cells[i].append(0)
return cells
def retrieve_empty_cell(self):
empty_cells=[]
for i in range(self.size):
for j in range(self.size):
if self.cells[i][j]==0:
empty_cells.append((i,j))
return empty_cells
def random_cell(self):
cell=random.choice(self.retrieve_empty_cell())
i=cell[0]
j=cell[0]
self.cells[i][j]=2
def left_compress(self):
self.compressed=False
new_empty_cells=self.generate_empty_grid()
for i in range(self.size):
count=0
for j in range(self.size):
if self.cells[i][j]!=0:
new_empty_cells[i][count]=self.cells[i][j]
if j!=count:
self.compressed=True
count=count+1
self.cells=new_empty_cells
class GamePanel:
def paint(self):
for i in range(self.grid.size):
for j in range(self.grid.size):
if self.grid.cells[i][j]==0:
self.cell_labels[i][j].config(
text='',
bg=GamePanel.EMPTY_CELL_COLOR)
else:
if self.grid.cells[i][j]>16:
self.cell_labels[i][j].config(
text=str(self.grid.cells[i][j]),
bg=GamePanel.CELL_BACKGROUND_COLOR_DICT.get('beyond'),
fg=GamePanel.CELL_COLOR_DICT.get('beyond'))
else:
self.cell_labels[i][j].config(
text=str(self.grid.cells[i][j]),
bg=GamePanel.CELL_BACKGROUND_COLOR_DICT.get(str(self.grid.cells[i][j])),
fg=GamePanel.CELL_COLOR_DICT.get(str(self.grid.cells[i][j])))
BACKGROUND_COLOR="#2852b2"
EMPTY_CELL_COLOR="#235a45"
CELL_BACKGROUND_COLOR_DICT={
'0':"#72527a",
'2':"#408b7a",
'4':"#ba8b7a",
'8':"#ba7435",
'16':"#3ea04f",
'32':"#be58ae",
'64':"#49586a",
'128':"#f9f97e",
'beyond':"#000000"
}
CELL_COLOR_DICT={
'0':"#553333",
'2':"#553333",
'4':"#553333",
'8':"#553333",
'16':"#553333",
'32':"#553333",
'64':"#553333",
'128':"#553333",
'beyond':"#ffffff"
}
FONT=("Verdana",24,'bold')
UP_KEYS=('w','W','Up')
LEFT_KEYS=('a','A','Left')
DOWN_KEYS=('s','S','Down')
RIGHT_KEYS=('d','D','Right')
def __init__(self,grid):
self.grid=grid
self.window=tk.Tk()
self.window.title("2048")
self.window.geometry('800x800')
self.background=tk.Frame(self.window,bg=GamePanel.BACKGROUND_COLOR)
self.cell_labels=[]
for i in range(self.grid.size):
self.cell_labels.append([])
for j in range(self.grid.size):
label=tk.Label(self.background,text='0',bg=GamePanel.EMPTY_CELL_COLOR,font=GamePanel.FONT,width=4,height=2)
label.grid(row=i,column=j,padx=5,pady=5)
self.cell_labels[i].append(label)
self.background.grid()
class Game:
def __init__(self,grid,panel):
self.grid=grid
self.panel=panel
self.start_cell_num=2
for i in range(self.start_cell_num):
self.grid.random_cell()
self.panel.paint()
self.panel.window.bind('<Key>',self.key_handler)
self.panel.window.mainloop()
def key_handler(self,event):
key_value=event.keysym
print('{}{}key is pressed'.format(key_value,key_value))
if key_value in GamePanel.UP_KEYS:
self.up()
elif key_value in GamePanel.DOWN_KEYS:
self.down()
elif key_value in GamePanel.LEFT_KEYS:
self.left()
elif key_value in GamePanel.RIGHT_KEYS:
self.right()
else:
pass
def up(self):
pass
def down(self):
pass
def right(self):
pass
def left(self):
pass
grid=Grid(5)
panel=GamePanel(grid)
Gamepanel=Game(grid,panel)
|
from math import *
T = int(input())
for cas in range(T):
a,b,c,d,e,f,g = map(int,input().split())
if(a**d+b**e+c**f==g):
print("Yes")
else:
print("No")
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import scipy.io as sio
import matplotlib.pyplot as plt
def attention(x, ch, scope='attention', reuse=False,bs=10):
with tf.variable_scope(scope, reuse=reuse):
f = slim.conv2d(x, ch // 8, 1, stride=1, scope='f_conv')
g = slim.conv2d(x, ch // 8, 1, stride=1, scope='g_conv')
h = slim.conv2d(x, ch, 1, stride=1, scope='h_conv')
# N = h * w
s = tf.matmul(tf.reshape(f, shape=[bs, -1, ch // 8]), tf.reshape(g, shape=[bs, -1, ch // 8]),
transpose_b=True) # # [bs, N, N]
beta = tf.nn.softmax(s, dim=-1) # attention map
o = tf.matmul(beta, tf.reshape(h, shape=[bs, -1, ch])) # [bs, N, C]
gamma = tf.get_variable("gamma", [1], initializer=tf.constant_initializer(0.0))
o = tf.reshape(o, shape=x.shape) # [bs, h, w, C]
x = gamma * o + x
return x
def inference(images, keep_probability, phase_train=True,
bottleneck_layer_size=128, weight_decay=0.0, reuse=None):
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.995,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# force in-place updates of mean and variance estimates
'updates_collections': None,
'scale':True,
'is_training':phase_train,
# Moving averages ends up in the trainable variables collection
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
}
with slim.arg_scope([slim.conv2d, slim.fully_connected,slim.conv2d_transpose],
weights_initializer=slim.initializers.xavier_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return encoder_decoder(images, is_training=phase_train,
dropout_keep_prob=keep_probability,reuse=reuse)
def encoder_decoder(inputs, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='generator'):
end_points = {}
with tf.variable_scope(scope, 'generator', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
##################### encoder ##############################################
net = slim.conv2d(inputs, 32, 3, stride=1, padding='SAME',scope='en_1_1')
net=slim.conv2d(net, 32, 3, stride=1, padding='SAME',scope='en_1_2')
net = slim.conv2d(net, 32, 3, stride=1, padding='SAME', scope='en_1_3')
end_points['encode_1'] = net #bs*200*512*32
net=slim.max_pool2d(net,2,stride=2,padding='SAME',scope='Pool1')
#bs*100*256*32
net = slim.conv2d(net, 64, 3, stride=1, padding='SAME', scope='en_2_1')
net = slim.conv2d(net,64, 3, stride=1, padding='SAME', scope='en_2_2')
net = slim.conv2d(net,64, 3, stride=1, padding='SAME', scope='en_2_3')
end_points['encode_2'] = net#(bs, 50, 135, 64)
net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='Pool2')
#(bs, 50, 128, 64)
net = slim.conv2d(net, 128, 3, stride=1, padding='SAME', scope='en_3_1')
net = slim.conv2d(net,128, 3, stride=1, padding='SAME', scope='en_3_2')
net = slim.conv2d(net,128, 3, stride=1, padding='SAME', scope='en_3_3')
end_points['encode_3'] = net
net = slim.max_pool2d(net, 2, stride=2, padding='VALID', scope='Pool3')
#(bs, 25, 64, 128)
#
net = slim.conv2d(net, 256, 3, stride=1, padding='SAME', scope='en_4_1')
net = slim.conv2d(net,256, 3, stride=1, padding='SAME', scope='en_4_2')#(bs, 12, 34, 256)
net = slim.conv2d(net,256, 3, stride=1, padding='SAME', scope='en_4_3')
end_points['encode_4'] = net
net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='Pool4')
# (bs, 13, 32, 256)
net=slim.conv2d(net, 512, 3, stride=1, padding='SAME', scope='en_5_1')
net=slim.conv2d(net, 512, 3, stride=1, padding='SAME', scope='en_5_2')
end_points['encode_5'] = net
net = slim.max_pool2d(net, 2, stride=2, padding='SAME', scope='Pool5')
net=slim.conv2d(net, 1024, 3, stride=1, padding='SAME', scope='en_6')
net = slim.conv2d(net, 1024, 3, stride=1, padding='SAME', scope='en_7')
# ##################### encoder ##############################################
net = slim.conv2d_transpose(net, 512, 2, 2, padding='VALID')
net=tf.concat([net,end_points['encode_5']],3)
net = slim.conv2d(net, 512, 3, stride=1)
net = slim.conv2d(net, 512, 3, stride=1)
net=slim.conv2d_transpose(net,256,2,2,padding='VALID')
net=tf.concat([net,end_points['encode_4']],3)
net=slim.conv2d(net,256,3,stride=1)
net=slim.conv2d(net,256,3,stride=1)
net=slim.conv2d(net,256,3,stride=1)
net=slim.conv2d_transpose(net,128,2,2,padding='VALID')
net = tf.concat([net, end_points['encode_3']], 3)
net = slim.conv2d(net, 128, 3, stride=1)
net = slim.conv2d(net, 128, 3, stride=1)
net=slim.conv2d(net,128,3,stride=1)
net = attention(net, 128, scope='att')
#(bs, 50, 128, 128)
net=slim.conv2d_transpose(net,64,2,2,padding='SAME')
net = tf.concat([net, end_points['encode_2']], 3)
net = slim.conv2d(net, 64, 3, stride=1)
net = slim.conv2d(net, 64, 3, stride=1)
net=slim.conv2d(net,64,3,stride=1)
#bs,100,256,64
net = slim.conv2d_transpose(net, 32, 2, 2, padding='SAME')
net = tf.concat([net, end_points['encode_1']], 3)
net = slim.conv2d(net, 32, 3, stride=1)
net = slim.conv2d(net, 32, 3, stride=1)
net=slim.conv2d(net,32,3,stride=1)
# bs,200,512,32
res1=net
out6=slim.conv2d(net,6,3,stride=1,activation_fn=tf.nn.sigmoid)
net = tf.concat([res1, out6], 3)
net = slim.conv2d(net, 32, 3, stride=1)
res2=net
out12=slim.conv2d(net,12,3,stride=1,activation_fn=tf.nn.sigmoid)
net = tf.concat([res2, out12], 3)
net = slim.conv2d(net, 32, 3, stride=1)
out24=slim.conv2d(net,24,1,stride=1,activation_fn=tf.nn.sigmoid)
return [out6,out12,out24]
|
# -*- coding: 850 -*-
from datetime import datetime, timedelta, date
from pytz import timezone
from openerp import SUPERUSER_ID
from openerp import api, fields, models, _
import openerp.addons.decimal_precision as dp
from openerp.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.exceptions import except_orm, Warning, RedirectWarning, UserError
from dateutil.parser import parse as parse_date
import random
import re
import json
import pprint
#action_delito_robo_perfil_genero
#action_delito_robo_perfil_edad
#action_delito_robo_perfil_repartidor
#action_delito_robo_perfil_discapacidad
#action_delito_robo_tiempo_hora
#action_delito_robo_tiempo_clima
#action_delito_robo_donde_municipio
#action_delito_robo_donde_lugar
#action_delito_robo_objeto_objeto
#action_delito_robo_objeto_monto
#action_delito_robo_pr_numero
#action_delito_robo_pr_arma
#action_delito_robo_pr_violencia
#action_delito_robo_pr_pr
#action_delito_robo_circunstancias_testigo
#action_delito_robo_circunstancias_camara
#action_delito_robo_policia_policia
#action_delito_robo_policia_tiempo
#action_delito_robo_policia_entrevista
#action_delito_robo_policia_cadena
class imco_norma_delitos_jurisdiccion_entidades_requeridas(models.Model):
_inherit = "imco.norma.delitos.jurisdiccion.entidades.requeridas"
def clean_text(self,text):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', text)
return cleantext.strip()
def search_text_in_entidades_alias(self, text, codigo):
sql = "SELECT distinct a.valor"
sql += " FROM imco_norma_nltk_entidades e, imco_norma_nltk_entidades_alias a"
sql += " WHERE e.id = a.entidad_id AND e.codigo = %s AND"
sql += " to_tsvector('spanish', %s ) @@ to_tsquery('spanish', replace(a.name, ' ' , '_') ) OR"
sql += " %s = a.name"
self.env.cr.execute(sql, (codigo, text, text) )
r = self.env.cr.dictfetchall()
if len(r) != 1:
return False
else:
return r[0]["valor"]
def analisis_mensaje_dialogflow_manejo_unknown(self, text, session_id, message=None, context = None):
analisis = message.analisis_dialogflow(text = text, session_id = session_id, context = context)
if analisis["action"] == "input.unknown":
codigo = context.split("_")[-1]
sql = "SELECT distinct a.valor"
sql += " FROM imco_norma_nltk_entidades e, imco_norma_nltk_entidades_alias a"
sql += " WHERE e.id = a.entidad_id AND e.codigo = %s AND"
sql += " to_tsvector('spanish', %s ) @@ to_tsquery('spanish', replace(a.name, ' ' , '_') ) OR"
sql += " %s = a.name"
print(sql)
self.env.cr.execute(sql, (codigo, text, text) )
r = self.env.cr.dictfetchall()
print(r)
analisis["entidades"] = {}
for x in r:
print(x)
if codigo not in analisis["entidades"]:
analisis["entidades"][codigo] = []
analisis["entidades"][codigo].append(x["valor"])
else:
return analisis
return analisis
|
from light.effect.resolved_effect import ResolvedEffect
from light.rgbw import RGBW
from light.effect.effect_priority import EffectPriority
class PartialEffect:
def __init__(self, startTime=0):
self.startTime = startTime
self.isModifier = False
def getEffect(self):
return ResolvedEffect(None, None, None)
def isTemporal(self):
return False
def isComplete(self, t):
return False
def getAmbientDuration(self):
return 0 |
# Module file for implementation of ID3 algorithm.
import pandas as pd
import numpy as np
from helper import *
from tree import Node
import pickle
# You can add optional keyword parameters to anything, but the original
# interface must work with the original test file.
# You will of course remove the "pass".
import os, sys
import numpy
# You can add any other imports you need.
class DecisionTree:
def __init__(self, load_from=None):
# Fill in any initialization information you might need.
#
# If load_from isn't None, then it should be a file *object*,
# not necessarily a name. (For example, it has been created with
# open().)
self.model = None
print("Initializing classifier.")
if load_from is not None:
print("Loading from file object.")
self.model = pickle.load(load_from)
def _id3(self, X, attrs, target_attr, unique_values, depth = 1):
"""
Takes in a dataframe (or subset) `X`
that we want to split on one of `attrs`
Target variable (class) we want to predict is in `target_attr`
`unique_values` is a dictionary (attribute, list of unique values for attriute)
Not sure if this is the best way to do it, but this will ensure that we add
each value we see in the training set to every branch.
"""
# All positive or all negative examples
root = Node()
root.depth = depth
if len(X.groupby(target_attr)) == 1:
# Return single-node tree Root with label = first values in X[target_attr]
if len(X[target_attr]) > 0:
root.label = X[target_attr].iloc[0]
return root
if len(attrs) <= 0:
# label = most common value of the target attribute in the examples.
root.label = X[target_attr].value_counts().idxmax()
print("Get max category!", root)
return root
# Compute the maximum information gain attribute
ig_max = -1
max_attr = None
col_midpoint = {}
new_attrs = list(attrs.copy())
for a in attrs:
# Continuous data
if X[a].dtype == 'float64':
# Convert attribute to binary split with best information gain
(max_gain, max_midpoint, max_col) = binary_split_cont(X, a, target_attr)
# Rename the now binary column with _split appended to name
col_name = a + "_split"
X[col_name] = max_col
col_midpoint[col_name] = max_midpoint
# Update the unique values object with new column info
unique_values[col_name] = X[col_name].unique()
new_attrs.remove(a)
new_attrs.append(col_name)
a = col_name
ig = info_gain_df(X, a, target_attr)
if ig > ig_max:
ig_max = ig
max_attr = a
# Remove _split from column name
if max_attr in col_midpoint:
root.child_split = col_midpoint[max_attr]
root.child_attr = max_attr[:-6]
root.continuous_child = True
else:
root.child_attr = max_attr
new_attrs.remove(max_attr)
# Compute all the possible values for the attribute
for u in unique_values[max_attr]:
# Set each of the children to the results from
# _id3 on
# X[max_attr == u] as the data frame
# Remove the current attribute from the array
examples = X[X[max_attr] == u]
root.children[u] = Node()
if len(examples) <= 0:
# Add a leaf node with label = most common target value in the examples
root.children[u].label = X[target_attr].value_counts().idxmax()
else:
root.children[u] = self._id3(examples, new_attrs, target_attr, unique_values, depth+1)
# Set properties common to both cases
root.children[u].value = u
root.children[u].attr = max_attr
# We have a continuous variable
if max_attr in col_midpoint:
root.children[u].split_value = col_midpoint[max_attr]
# Ugly way to remove the _split (6 characters) that we added to end of name
root.children[u].attr = max_attr[:-6]
return root
def train(self, X, y, attrs, prune=False):
# Doesn't return anything but rather trains a model via ID3
# and stores the model result in the instance.
# X is the training data, y are the corresponding classes the
# same way "fit" worked on SVC classifier in scikit-learn.
# attrs represents the attribute names in columns order in X.
#
# Implementing pruning is a bonus question, to be tested by
# setting prune=True.
#
# Another bonus question is continuously-valued data. If you try this
# you will need to modify predict and test.
joined_df = pd.concat([X, y], axis=1)
# Compute the possible values for each attribute
# Store in dictionary
unique_values = {a: joined_df[a].unique() for a in joined_df}
model = self._id3(joined_df, attrs, y.name, unique_values)
self.model = model
def _predict_one(self, instance):
"Returns the class of a single given instance."
current_node = self.model
while not current_node.label:
if current_node.continuous_child:
# Need to put split value into parent, not child
val = instance[current_node.child_attr] <= current_node.child_split
else:
val = instance[current_node.child_attr]
current_node = current_node.children[val]
return current_node.label
def predict(self, instance):
# Returns the class of a given instance.
# Raise a ValueError if the class is not trained.
if not self.model:
raise ValueError("Model is not trained.")
preds = instance.apply(self._predict_one, axis=1)
preds.name = "prediction"
return preds
def _confusion_matrix(self, predicted, actual):
actual_classes = set(predicted)
predicted_classes = set(actual)
# Combine the possible classes in both y and predicted
classes = actual_classes.union(predicted_classes)
n = len(classes)
conf_mat = pd.DataFrame(np.zeros((n, n)))
conf_mat.columns = classes
conf_mat.index = classes
for (pred, actual) in zip(predicted, actual):
conf_mat[pred][actual] += 1
return conf_mat
def _measures(self, confusion_matrix):
true_pos = np.diag(confusion_matrix)
count = confusion_matrix.values.sum()
# Precision is the # we got correct, over the amount predicted (sum cols)
precision = true_pos / confusion_matrix.sum(axis=0)
# If we have no predicted values returns NaN (true positives + false positives = 0)
if sum(np.isnan(precision)) > 0:
print("Precision and F-score are ill-defined and set to nan in labels with no predicted samples.")
# Recall is the # we got correct, over the actual values for that attribute (sum rows)
recall = true_pos / confusion_matrix.sum(axis=1)
if sum(np.isnan(recall)) > 0:
print("Recall and F-score are ill-defined and being set to nan in labels with no predicted samples.")
accuracy = true_pos.sum() / count
f1 = 2 * precision * recall / (precision + recall)
return {
'accuracy': accuracy,
'precision': precision.to_dict(),
'recall': recall.to_dict(),
'F1': f1.to_dict()
}
def test(self, X, y, display=False):
# Returns a dictionary containing test statistics:
# accuracy, recall, precision, F1-measure, and a confusion matrix.
# If display=True, print the information to the console.
# Raise a ValueError if the class is not trained.
result = {'precision':None,
'recall':None,
'accuracy':None,
'F1':None,
'confusion-matrix':None}
preds = self.predict(X)
result['confusion-matrix'] = self._confusion_matrix(preds, y)
# Add accuracy, recall and precision and update the result
result.update(self._measures(result['confusion-matrix']))
if display:
print(result)
return result
def __str__(self):
# Returns a readable string representation of the trained
# decision tree or "ID3 untrained" if the model is not trained.
return str(self.model)
def save(self, output):
# 'output' is a file *object* (NOT necessarily a filename)
# to which you will save the model in a manner that it can be
# loaded into a new DecisionTree instance.
pickle.dump(self.model, output)
|
from __future__ import unicode_literals
import csv
from django.core.management.base import BaseCommand
from django.utils.encoding import force_text
from ...models import HistoricalStockData, Stocks
SILENT, NORMAL, VERBOSE, VERY_VERBOSE = 0, 1, 2, 3
class Command(BaseCommand):
help = (
"Imports movies from a local CSV file. "
"Expects title, URL, and release year."
)
def add_arguments(self, parser):
# Positional arguments
parser.add_argument(
"file_path",
nargs=1
)
def handle(self, *args, **options):
verbosity = options.get("verbosity", NORMAL)
file_path = options["file_path"][0]
if verbosity >= NORMAL:
self.stdout.write("=== Movies imported ===")
with open(file_path) as f:
reader = csv.reader(f)
s1 = Stocks(stock_id="MST", company="MICROSOFT", ticker ="MSFT",
industry="IT", sector="Software")
s1.save()
for rownum, (date,op,high,low,close,volume) in \
enumerate(reader):
"""
if rownum == 0:
# let's skip the column captions
continue
"""
data, created = HistoricalStockData.objects.get_or_create(
stock_id=s1,
date=force_text(date),
open=force_text(op),
high=force_text(high),
low=force_text(low),
close=force_text(close),
volume=force_text(volume)
)
|
"""
Think of your favorite mode of transportation, such as a motorcycle or a car, and make a list that stores several examples.
Use your list to print a series of statements about these items, such as “I would like to own a Honda motorcycle.”
"""
my_list = ['Ducati', 'audi', 'bmw']
print(f"I don't like {my_list[0]} motorcycle.")
print(f"My dreamest car is {my_list[1].title()} R8!")
print(f"But first, I need to buy old {my_list[2].upper()}.")
|
from dataclasses import dataclass, field
from project_management.entities.task import Task
from datetime import datetime, timezone
from project_management.asana import util
# Asana application Specific Task entites
@dataclass
class AsanaTask(Task):
task_completed: bool = False
due_date: str = None
tag: str = None
priority: str = None
section_id: str = None
def __post_init__(self):
if not (self.due_date is None or "-" in self.due_date):
format = "%Y-%m-%d"
self.due_date = util.epoch_to_format(format, self.due_date)
|
import requests
from bs4 import BeautifulSoup
import random
req = requests.get("https://dhlottery.co.kr/gameResult.do?method=byWin&drwNo=837").text
soup = BeautifulSoup(req, 'html.parser')
for i in range(6):
lucky = soup.select_one(".ball_645").text
print(lucky)
#article > div:nth-child(2) > div > div.win_result > div > div.num.win
#article > div:nth-child(2) > div > div.win_result > div > div.num.win > p > span.ball_645.lrg.ball1
#article > div:nth-child(2) > div > div.win_result > div > div.num.win > p > span:nth-child(2)
#article > div:nth-child(2) > div > div.win_result > div > div.num.win > p > span:nth-child(3)
#article > div:nth-child(2) > div > div.win_result > div > div.num.win > p > span:nth-child(4)
#article > div:nth-child(2) > div > div.win_result > div > div.num.win > p > span.ball_645.lrg.ball4
#article > div:nth-child(2) > div > div.win_result > div > div.num.win > p > span.ball_645.lrg.ball5
#article > div:nth-child(2) > div > div.win_result > div > div.num.bonus > p > span |
import sys
import dictionary
import logging
from grammar import Sentence, Action
'''
The main running code of the program, probably.
Might later be either renamed "__main__.py" or replaced with a file named __main__.py so that we can simply run the wat package.
'''
__author__ = "SAI"
class Translator:
'''
??? This might not be necessary
'''
pass
def perform_sentence(sentence_str):
'''
Takes the sentence string, parses it, evaluates it, and then performs it
'''
sentence_test = Sentence(sentence_str);
sentence_test.print_sentence()
Action(sentence_test).perform()
if __name__ == "__main__":
#This is how we specify that this module is the one being run
#If this module is run as a script, do the following...
sentence_str = ""
if len(sys.argv) > 1:
#Currently, command line arguments can be formed into a sting
sentence_str= " ".join(sys.argv[1:])
else:
sentence_str = raw_input("Please enter your sentence: ");
perform_sentence(sentence_str)
|
from django.db import models
from eSchoolGateProject import settings
class Subject(models.Model):
name = models.CharField(max_length=100,)
teacher = models.ManyToManyField(settings.AUTH_USER_MODEL)
classroom = models.CharField(max_length=7,)
def __unicode__(self):
return self.name + ' - ' + self.classroom
class Lesson(models.Model):
subject = models.ForeignKey(Subject, related_name='lesson', on_delete=models.CASCADE)
topic = models.CharField(default='', max_length=500)
description = models.CharField(default='', max_length=500)
week = models.IntegerField(unique=True)
video = models.FileField(upload_to='media/videos')
note = models.FileField(upload_to='media/notes')
|
# Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import dataent, unittest, os
from dataent.utils import cint
from dataent.model.naming import revert_series_if_last, make_autoname, parse_naming_series
class TestDocument(unittest.TestCase):
def test_get_return_empty_list_for_table_field_if_none(self):
d = dataent.get_doc({"doctype":"User"})
self.assertEqual(d.get("roles"), [])
def test_load(self):
d = dataent.get_doc("DocType", "User")
self.assertEqual(d.doctype, "DocType")
self.assertEqual(d.name, "User")
self.assertEqual(d.allow_rename, 1)
self.assertTrue(isinstance(d.fields, list))
self.assertTrue(isinstance(d.permissions, list))
self.assertTrue(filter(lambda d: d.fieldname=="email", d.fields))
def test_load_single(self):
d = dataent.get_doc("Website Settings", "Website Settings")
self.assertEqual(d.name, "Website Settings")
self.assertEqual(d.doctype, "Website Settings")
self.assertTrue(d.disable_signup in (0, 1))
def test_insert(self):
d = dataent.get_doc({
"doctype":"Event",
"subject":"test-doc-test-event 1",
"starts_on": "2014-01-01",
"event_type": "Public"
})
d.insert()
self.assertTrue(d.name.startswith("EV"))
self.assertEqual(dataent.db.get_value("Event", d.name, "subject"),
"test-doc-test-event 1")
# test if default values are added
self.assertEqual(d.send_reminder, 1)
return d
def test_insert_with_child(self):
d = dataent.get_doc({
"doctype":"Event",
"subject":"test-doc-test-event 2",
"starts_on": "2014-01-01",
"event_type": "Public"
})
d.insert()
self.assertTrue(d.name.startswith("EV"))
self.assertEqual(dataent.db.get_value("Event", d.name, "subject"),
"test-doc-test-event 2")
def test_update(self):
d = self.test_insert()
d.subject = "subject changed"
d.save()
self.assertEqual(dataent.db.get_value(d.doctype, d.name, "subject"), "subject changed")
def test_mandatory(self):
dataent.delete_doc_if_exists("User", "test_mandatory@example.com")
d = dataent.get_doc({
"doctype": "User",
"email": "test_mandatory@example.com",
})
self.assertRaises(dataent.MandatoryError, d.insert)
d.set("first_name", "Test Mandatory")
d.insert()
self.assertEqual(dataent.db.get_value("User", d.name), d.name)
def test_confict_validation(self):
d1 = self.test_insert()
d2 = dataent.get_doc(d1.doctype, d1.name)
d1.save()
self.assertRaises(dataent.TimestampMismatchError, d2.save)
def test_confict_validation_single(self):
d1 = dataent.get_doc("Website Settings", "Website Settings")
d1.home_page = "test-web-page-1"
d2 = dataent.get_doc("Website Settings", "Website Settings")
d2.home_page = "test-web-page-1"
d1.save()
self.assertRaises(dataent.TimestampMismatchError, d2.save)
def test_permission(self):
dataent.set_user("Guest")
self.assertRaises(dataent.PermissionError, self.test_insert)
dataent.set_user("Administrator")
def test_permission_single(self):
dataent.set_user("Guest")
d = dataent.get_doc("Website Settings", "Website Settigns")
self.assertRaises(dataent.PermissionError, d.save)
dataent.set_user("Administrator")
def test_link_validation(self):
dataent.delete_doc_if_exists("User", "test_link_validation@example.com")
d = dataent.get_doc({
"doctype": "User",
"email": "test_link_validation@example.com",
"first_name": "Link Validation",
"roles": [
{
"role": "ABC"
}
]
})
self.assertRaises(dataent.LinkValidationError, d.insert)
d.roles = []
d.append("roles", {
"role": "System Manager"
})
d.insert()
self.assertEqual(dataent.db.get_value("User", d.name), d.name)
def test_validate(self):
d = self.test_insert()
d.starts_on = "2014-01-01"
d.ends_on = "2013-01-01"
self.assertRaises(dataent.ValidationError, d.validate)
self.assertRaises(dataent.ValidationError, d.run_method, "validate")
self.assertRaises(dataent.ValidationError, d.save)
def test_update_after_submit(self):
d = self.test_insert()
d.starts_on = "2014-09-09"
self.assertRaises(dataent.UpdateAfterSubmitError, d.validate_update_after_submit)
d.meta.get_field("starts_on").allow_on_submit = 1
d.validate_update_after_submit()
d.meta.get_field("starts_on").allow_on_submit = 0
# when comparing date(2014, 1, 1) and "2014-01-01"
d.reload()
d.starts_on = "2014-01-01"
d.validate_update_after_submit()
def test_varchar_length(self):
d = self.test_insert()
d.subject = "abcde"*100
self.assertRaises(dataent.CharacterLengthExceededError, d.save)
def test_xss_filter(self):
d = self.test_insert()
# script
xss = '<script>alert("XSS")</script>'
escaped_xss = xss.replace('<', '<').replace('>', '>')
d.subject += xss
d.save()
d.reload()
self.assertTrue(xss not in d.subject)
self.assertTrue(escaped_xss in d.subject)
# onload
xss = '<div onload="alert("XSS")">Test</div>'
escaped_xss = '<div>Test</div>'
d.subject += xss
d.save()
d.reload()
self.assertTrue(xss not in d.subject)
self.assertTrue(escaped_xss in d.subject)
# css attributes
xss = '<div style="something: doesn\'t work; color: red;">Test</div>'
escaped_xss = '<div style="color: red;">Test</div>'
d.subject += xss
d.save()
d.reload()
self.assertTrue(xss not in d.subject)
self.assertTrue(escaped_xss in d.subject)
def test_link_count(self):
if os.environ.get('CI'):
# cannot run this test reliably in travis due to its handling
# of parallelism
return
from dataent.model.utils.link_count import update_link_count
update_link_count()
doctype, name = 'User', 'test@example.com'
d = self.test_insert()
d.append('event_participants', {"reference_doctype": doctype, "reference_docname": name})
d.save()
link_count = dataent.cache().get_value('_link_count') or {}
old_count = link_count.get((doctype, name)) or 0
dataent.db.commit()
link_count = dataent.cache().get_value('_link_count') or {}
new_count = link_count.get((doctype, name)) or 0
self.assertEqual(old_count + 1, new_count)
before_update = dataent.db.get_value(doctype, name, 'idx')
update_link_count()
after_update = dataent.db.get_value(doctype, name, 'idx')
self.assertEqual(before_update + new_count, after_update)
def test_naming_series(self):
data = ["TEST-", "TEST/17-18/.test_data./.####", "TEST.YYYY.MM.####"]
for series in data:
name = make_autoname(series)
prefix = series
if ".#" in series:
prefix = series.rsplit('.',1)[0]
prefix = parse_naming_series(prefix)
old_current = dataent.db.get_value('Series', prefix, "current", order_by="name")
revert_series_if_last(series, name)
new_current = cint(dataent.db.get_value('Series', prefix, "current", order_by="name"))
self.assertEqual(cint(old_current) - 1, new_current)
|
from selenium import webdriver
import time
from selenium.webdriver.support.select import Select
def driver_uu():
driver = webdriver.Chrome('../chromedriver/chromedriver.exe')
driver.get('http://192.168.60.146:8080/demo1.html')
time.sleep(3)
input_el = driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td[2]/input')
input_el.send_keys('啦啦啦啦啦啦拉拉拉拉啦')
time.sleep(3)
#清除
input_el.clear()
time.sleep(3)
#关闭浏览器
driver.quit()
def kiki_l():
driver = webdriver.Chrome('../chromedriver/chromedriver.exe')
driver.get('http://192.168.60.146:8080/demo1.html')
time.sleep(3)
#
input_el = driver.find_element_by_xpath('/html/body/table/tbody/tr[2]/td[2]/input')
input_el.send_keys('啦啦啦啦啦啦拉拉拉拉啦')
time.sleep(3)
#
input_id = driver.find_element_by_id('file1')
input_id.send_keys('C:/Users/Administrator/Desktop/搜狗截图20190610162557.png')
time.sleep(2)
# 关闭浏览器
driver.quit()
#
radio_els = driver.find_elements_by_name('radio')
print(type(radio_els))
radio_els[0].click()
time.sleep(2)
radio_els[1].click()
time.sleep(2)
# 关闭浏览器
driver.quit()
if __name__ == '__main__':
driver = webdriver.Chrome('../chromedriver/chromedriver.exe')
driver.get('http://192.168.60.146:8080/demo1.html')
time.sleep(3)
# checkbox_els = driver.find_elements_by_class_name('checkbox')
# print(checkbox_els)
# checkbox_els[0].click()
# time.sleep(3)
# checkbox_els[1].click()
# time.sleep(3)
# checkbox_els[2].click()
# time.sleep(3)
# typr_els = driver.find_element_by_xpath('/html/body/table/tbody/tr[7]/td[2]/input').send_keys(1478542)
# time.sleep(3)
#
#
# typr_els.clear()
# time.sleep(3)
# select_els = driver.find_element_by_css_selector('body > table > tbody > tr:nth-child(12) > td:nth-child(2) > select')
# time.sleep(3)
# c = Select(select_els)
#
# c.select_by_value('z1')
# time.sleep(2)
# c.select_by_value('z2')
# time.sleep(2)
# c.select_by_value('z0')
# time.sleep(2)
driver.find_element_by_link_text('当当').click()
time.sleep(2)
#还回
driver.back()
time.sleep(2)
driver.find_element_by_link_text('问问度娘').click()
time.sleep(2)
driver.back()
time.sleep(2)
driver.forward()
time.sleep(2)
driver.refresh()
time.sleep(2)
# 关闭浏览器
driver.quit()
|
"""
Author: Yash Soni
Date: 23_Jan_2021
Purpose: Python Learning Purpose
"""
import random
def Random_generaotr(new_list):
while True:
Random=random.randint(1,len(new_list))
if Random%2!=0:
Surname=Random
break
else:
continue
return Surname
if __name__ == '__main__':
num_friends=int(input("Please enter the Numbers of your Friends :\n"))
name_list=[]
for i in range(num_friends):
name_list.append(input(f"{i+1}. Please enter the name of your friend: \n"))
list=[]
new_list=[]
for name in name_list:
list=name.split(" ")
for i in list:
new_list.append(i)
print(new_list)
a=0
surname_list=[]
print("Your friends funny names are as follows: ")
for i in range(num_friends):
if i==0:
surname = Random_generaotr(new_list)
pass
else:
surname = Random_generaotr(new_list)
for x in surname_list:
if x==new_list[surname]:
surname = Random_generaotr(new_list)
continue
print(f"{new_list[0+a]} {new_list[surname]}")
surname_list.append(new_list[surname])
a+=2
|
import bcrypt
from django.shortcuts import render, redirect
from .models import Users, Posts, Comments
from django.contrib import messages
from .decorators import login_required
@login_required
def index(request):
context = {
'users' : Users.objects.all(),
'posts' : Posts.objects.all().order_by('-created_at'),
}
return render(request, 'index.html', context)
@login_required
def wall(request):
context = {
'users' : Users.objects.all(),
'posts' : Posts.objects.all().order_by('-created_at'),
'comments' : Comments.objects.all()
}
return render(request, 'wall.html', context)
@login_required
def comment(request, message_id):
comment = request.POST['comment']
user_id = int(request.session['user']['id'])
user = Users.objects.get(id = user_id)
post = Posts.objects.get(id=message_id)
new_comment = Comments.objects.create(comment = comment)
post.comments.add(new_comment)
user.comments.add(new_comment)
context = {
'posts' : Posts.objects.all().order_by('-created_at'),
'post_comments' : Comments.objects.all(),
'users' : Users.objects.all()
}
return render(request, 'wall.html', context)
@login_required
def new_message(request):
user_id = int(request.session['user']['id'])
user = Users.objects.get(id = user_id)
post = str(request.POST['message'])
# import pdb
# pdb.set_trace()
new_message = Posts.objects.create(post = post)
user.posts.add(new_message)
messages.success(request, "Message successfully created")
return redirect(request.META.get('HTTP_REFERER'))
@login_required
def destroy_message(request, message_id):
post = Posts.objects.get(id=message_id)
post.delete()
return redirect(request.META.get('HTTP_REFERER'))
@login_required
def destroy_comment(request, comment_id):
comment = Comments.objects.get(id=comment_id)
comment.delete()
return redirect(request.META.get('HTTP_REFERER'))
def register(request):
if request.method == 'GET':
return render(request, 'register.html')
else:
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
password = request.POST['password']
password_confirm = request.POST['password_confirm']
errors = Users.objects.basic_validator(request.POST)
if len(errors) > 0:
for key, error_message in errors.items():
messages.error(request, error_message)
return redirect('/register')
password = bcrypt.hashpw(password.encode(), bcrypt.gensalt()).decode()
user = Users.objects.create(first_name = first_name, last_name = last_name, email = email, password = password)
request.session['user'] = {
'id' : user.id,
'name' : user.first_name,
'email' : user.email
}
messages.success(request, 'User successfully registered')
return redirect('../')
def login(request):
email = request.POST['email']
password = request.POST['password']
try:
user = Users.objects.get(email = email)
except Users.DoesNotExist:
messages.error(request, 'The user or password does not exist')
return redirect('/register')
if not bcrypt.checkpw(password.encode(), user.password.encode()):
messages.error(request, 'The user or password does not exist')
return redirect('/register')
request.session['user'] = {
'id': user.id,
'name': user.first_name,
'email': user.email
}
messages.success(request, f'Welcome {user.first_name}')
return redirect('../')
def logout(request):
del request.session['user']
return redirect('/register') |
import re
def abbreviate(words):
return ''.join([word[0].upper() for word in re.split('[\s|_|-]+\W?', words)])
|
#coding=utf-8
import re
import requests
from lxml import etree
import lxml.html
import os
import time
import threading
def url_1(n): #定义爬取得页数
urls=[]
for i in range(1,n+1):
url = 'http://www.kzj365.com/category-9-b0-min0-max0-page-'+str(i)+'-default-DESC-pre2.html'
print url
urls.append(url)
return urls
def url_2(url): #定义爬取的数据
html = requests.get(url).content
doc = lxml.html.fromstring(html)
href = doc.xpath('//div[@class="goods-lists clearfix"]/ul/li/a/@href')
m=0
for ss in href:
urls='http://www.kzj365.com/'+href[m] #获取页面每个产品的url
print urls
htmls = requests.get(urls).content #开始解析二级页面
docs = lxml.html.fromstring(htmls) #或page = etree.HTML(html)
titles = docs.xpath('//div[@class="jqzoom"]/img/@alt')
price=docs.xpath('//div[@class="gi-pinfo"]/div[2]/span[1]/b/text()')
img = docs.xpath('//div[@class="jqzoom"]/img/@data-url')
i=0
for rr in titles: #保存文本
results = titles[i]+','+price[i]+','+img[i]+'\n' #/n 进行换行
print results
b=str(results)
with open('lxml_1.csv','a') as f:
f.write(b)
p_url=img[i] #保存图片
r = requests.get(p_url) #图片名称
list_name = img[i].split('/')
file_name = list_name[len(list_name)-1] #分割取最后一组数据
path="D:\\meinv\\kzj"
file_path='%s/%s'%(path,file_name)
if not os.path.exists(path): #判断路径是否存在,不存在
os.makedirs(path) #就创建路径file_path
print 'file_path',file_path
with open(file_path,'wb') as code:
code.write(r.content)
i += 1
f.close()
m+=1
if __name__ == '__main__':
page=raw_input("请输入页数: ")
star=time.time()
for url in url_1(int(page)):
t = threading.Thread(target=url_2(url),args=(url,)) #创建了threads数组,创建线程t1,# target: 要执行的方法;name: 线程名;args/kwargs: 要传入方法的参数。
t.start() #开始线程活动。
t.join() #join([timeout]): 阻塞当前上下文环境的线程,直到调用此方法的线程终止或到达指定的timeout(可选参数)。
end=time.time()-star
print '文件下载完成,耗时:%f秒'%(end)
|
#x= 1 #int
#y=2.5 #float
#name = 'John' #str
#is_cool = True #bool
x, y, name, is_cool = (1, 2.5, 'John', True)
a= x+y
x=str(x)
print ('Hello')
print(is_cool, a)
print(type(x))
|
from linked_list import LinkedList as LL
from node import Node
# def reverse_ll(list1):
# """Reverse a singly linked list."""
# current = list1.head
# list2 = LL()
# while current is not None:
# # list2.insert(current)
# list2.head = Node(current.val, list2.head)
# current = current._next
# return list2.head.val
def reverse_ll(sll):
"""In-place solution to reverse a singly linked list."""
previous = None
current = sll.head
while current:
temp = current._next
current._next = previous
previous = current
current = temp
sll.head = previous
return sll.head.val
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that app bundles are built correctly.
"""
import TestGyp
import os
import sys
if sys.platform == 'darwin':
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
def ls(path):
'''Returns a list of all files in a directory, relative to the directory.'''
result = []
for dirpath, _, files in os.walk(path):
for f in files:
result.append(os.path.join(dirpath, f)[len(path) + 1:])
return result
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('framework.gyp', chdir='framework')
test.build('framework.gyp', 'test_framework', chdir='framework')
# Binary
test.built_file_must_exist(
'Test Framework.framework/Versions/A/Test Framework',
chdir='framework')
# Info.plist
info_plist = test.built_file_path(
'Test Framework.framework/Versions/A/Resources/Info.plist',
chdir='framework')
test.must_exist(info_plist)
test.must_contain(info_plist, 'com.yourcompany.Test_Framework')
# Resources
test.built_file_must_exist(
'Test Framework.framework/Versions/A/Resources/English.lproj/'
'InfoPlist.strings',
chdir='framework')
# Symlinks created by packaging process
test.built_file_must_exist('Test Framework.framework/Versions/Current',
chdir='framework')
test.built_file_must_exist('Test Framework.framework/Resources',
chdir='framework')
test.built_file_must_exist('Test Framework.framework/Test Framework',
chdir='framework')
# PkgInfo.
test.built_file_must_not_exist(
'Test Framework.framework/Versions/A/Resources/PkgInfo',
chdir='framework')
# Check that no other files get added to the bundle.
if set(ls(test.built_file_path('Test Framework.framework',
chdir='framework'))) != \
set(['Versions/A/Test Framework',
'Versions/A/Resources/Info.plist',
'Versions/A/Resources/English.lproj/InfoPlist.strings',
'Test Framework',
'Versions/A/Libraries/empty.c', # Written by a gyp action.
]):
test.fail_test()
test.pass_test()
|
with open("input.txt") as f:
lines = f.readlines()
dict = {}
for line in lines:
if "goes" in line:
line = line.split()
val = int(line[1])
bot = int(line[5])
if bot in dict.keys():
dict[bot].append(val)
else:
dict[bot] = [val]
q = []
for key in dict.keys():
if len(dict[key]) > 1:
q.append(key)
output = {}
while len(q) > 0:
bot = q.pop(0)
for line in lines:
line = line.split()
if "gives" in line and int(line[1]) == bot:
low = int(line[6])
high = int(line[11])
if line[5] == "output":
output[low] = min(dict[bot])
else:
if low in dict.keys():
dict[low].append(min(dict[bot]))
q.append(low)
else:
dict[low] = [min(dict[bot])]
if line[10] == "output":
output[high] = max(dict[bot])
else:
if high in dict.keys():
dict[high].append(max(dict[bot]))
q.append(high)
else:
dict[high] = [max(dict[bot])]
bot = 0
for key in dict.keys():
if 61 in dict[key] and 17 in dict[key]:
bot = key
print("Answer part 1:", bot)
print("Answer part 2:", output[0] * output[1] * output[2])
|
BANK_CSV_CONFIGS = {
"bank1": {
"has_header": True,
"columns_order": ["created_date", "type", "amount", "source", "destination"],
"date_format": "%b %d %Y"
},
"bank2": {
"has_header": True,
"columns_order": ["created_date", "type", "amount", "destination", "source"],
"date_format": "%d-%m-%Y"
},
"bank3": {
"has_header": True,
"columns_order": ["created_date", "type", "amount", "amount_hundredth", "destination", "source"],
"date_format": "%d %b %Y"
},
}
OUTPUT_DATE_FORMAT = "%d %b %Y"
|
# -*- coding: utf-8 -*-
__author__ = 'rasmus svebestad'
__email__ = 'rasmus.svebestad@nmbu.no'
# Task B
# Importing pytest for the test_empty_list function
import pytest
def median(data):
""""
The function is fetched from the INF200 exercise repository on GitHub
"""
sdata = sorted(data)
n = len(sdata)
if n % 2 == 1:
return sdata[n//2]
elif n == 0:
raise ValueError
else:
return 0.5 * (sdata[n//2 - 1] + sdata[n//2])
def test_one_element():
assert median([2]) == 2
def test_odd():
data = [1, 3, 5, 7]
assert median(data) == 4
def test_even():
data = [2, 4, 6, 8]
assert median(data) == 5
def test_ordered():
data = [1, 2, 3]
assert median(data) == 2
def test_reversed():
data = [3, 2, 1]
assert median(data) == 2
def test_unordered():
data = [2, 1, 3]
assert median(data) == 2
def test_empty_list():
with pytest.raises(ValueError):
median([])
def test_original_unchanged():
data = [1, 2, 3]
median(data)
assert data == [1, 2, 3]
def test_tuples():
data = (1, 2, 3, 4, 5)
assert median(data) == 3
|
import numpy as np
import matplotlib.pyplot as plt
from oj.axes import pcolormesh
def plotfaces(x,y,c,faces,slices,mask=None,axes=None,**kwargs):
if 'norm' not in kwargs:
vmin = kwargs.get('vmin',None)
vmax = kwargs.get('vmax',None)
if vmin is None or vmax is None:
cglob = c.toglobal()
if vmin is None:
kwargs['vmin'] = cglob.min()
if vmax is None:
kwargs['vmax'] = cglob.max()
del cglob
if axes is None:
axes = plt.gca()
ims = []
for f,s in zip(faces,slices):
if mask is None:
data = c.face(f).z
else:
data = np.ma.MaskedArray(c.face(f).z, mask.face(f).z)
if len(data.shape) > 2 and data.shape[0] <= 4:
# make last axis color
data = np.rollaxis(data, 0, len(data.shape))
ims.append( pcolormesh(axes, x.face(f).z[s], y.face(f).z[s], data[s], **kwargs) )
return ims
def setfaces(ims,c,faces,slices,mask=None,cmap=None,norm=None):
# flat plus rgba (if any)
fcsh = (-1,) + c.shape[:-2]
for im,f,s in zip(ims,faces,slices):
if mask is None:
data = c.face(f).z
else:
data = np.ma.MaskedArray(c.face(f).z, mask.face(f).z)
# make last axis color
if data.ndim > 2:
data = np.rollaxis(data, 0, data.ndim)
datas = data[s]
if datas.shape[0]*datas.shape[1] > im.get_facecolors().shape[0]:
datas = datas[:-1,:-1]
datas = datas.reshape(fcsh)
if len(fcsh) == 1:
datas = cmap(norm(datas))
im.set_facecolor(datas)
def plotll(x,y,c,mask=None,**kwargs):
faces = [0,1,2,3,5]
slices = [np.s_[:,:],
np.s_[:,:],
np.s_[:256,:],
np.s_[:256,:],
np.s_[:,255:],
]
ims1 = plotfaces(np.mod(x+90,360)-90,y,c,faces,slices,mask,**kwargs)
faces = [2,3,4,5]
slices = [
np.s_[255:,:],
np.s_[255:,:],
np.s_[:,:],
np.s_[:,:256],
]
ims2 = plotfaces(np.mod(x+270,360)-270,y,c,faces,slices,mask,**kwargs)
return ims1,ims2
def setll(imss,c,mask=None):
faces = [0,1,2,3,5]
slices = [np.s_[:,:],
np.s_[:,:],
np.s_[:256,:],
np.s_[:256,:],
np.s_[:,255:],
]
setfaces(imss[0],c,faces,slices,mask)
faces = [2,3,4,5]
slices = [
np.s_[255:,:],
np.s_[255:,:],
np.s_[:,:],
np.s_[:,:256],
]
setfaces(imss[1],c,faces,slices,mask)
class PcolorCS(object):
def __init__(self,x,y,c,faces,slices,offx,mask=None,**kwargs):
self.faces = faces
self.slices = slices
self.offxs = offx
self.ims = [ plotfaces(np.mod(x-x0,360)+x0,y,c,f,s,mask,**kwargs) for f,s,x0 in zip(faces,slices,offx) ]
def set(self,c,mask=None):
if len(c.shape) < 3:
cmap = self.ims[0][0].cmap
norm = self.ims[0][0].norm
else:
cmap = None
norm = None
for im,f,s in zip(self.ims,self.faces,self.slices):
setfaces(im,c,f,s,mask,cmap,norm)
def pcolor_ll_0_360(x,y,c,mask=None,**kwargs):
ny,nx = c.face(0).i.shape
nyh = ny//2
nxh = nx//2
faces = [[0,1,2,3,5], [0,2,4,5]]
slices = [[
np.s_[:,nxh:],
np.s_[:,:],
np.s_[:-nyh,:],
np.s_[:,:],
np.s_[:,nxh:],
],[
np.s_[:,:-nxh],
np.s_[nyh:,:],
np.s_[:,:],
np.s_[:,:-nxh],
]]
offx = [-90,90]
return PcolorCS(x,y,c,faces,slices,offx,mask,**kwargs)
def pcolor_ll_180_180(x,y,c,mask=None,**kwargs):
ny,nx = c.face(0).i.shape
nyh = ny//2
nxh = nx//2
faces = [[0,1,2,3,5], [2,3,4,5]]
slices = [[np.s_[:,:],
np.s_[:,:],
np.s_[:-nyh,:],
np.s_[:-nyh,:],
np.s_[:,nxh:],
],[
np.s_[nyh:,:],
np.s_[nyh:,:],
np.s_[:,:],
np.s_[:,:-nxh],
]]
offx = [-90,-270]
return PcolorCS(x,y,c,faces,slices,offx,mask,**kwargs)
|
import sys
import nfldb
import datetime
import xlsxwriter
from xlsxwriter.utility import xl_rowcol_to_cell
import json
def cleanTeamNameDK(team):
ret = team.upper()
ret = ret.replace('JAX', 'JAC')
return ret
def cleanPlayerNameDK(name):
ret = name
ret = ret.replace('Jamarcus Nelson', 'J.J. Nelson')
ret = ret.replace('Fozzy Whittaker', 'Foswhitt Whittaker')
ret = ret.replace('(Philly)', '')
ret = ret.replace('Boobie Dixon', 'Anthony Dixon')
ret = ret.replace('Tim Wright', 'Timothy Wright')
return ret
def cleanPositionDK(pos):
ret = pos.upper()
ret = ret.replace('DEF', 'DST')
ret = ret.replace('PK', 'K')
return ret
def getItemFromDictList(lookupkey, valuekey, dlist):
return (litem for litem in dlist if litem[lookupkey] == valuekey).next()
def nullValue():
return None
db = nfldb.connect()
q = nfldb.Query(db)
year = 2015
week = 15
print 'load teamsbycity json'
with open('teams/teamsbycity.json') as data_file:
teamsbycity = json.load(data_file)
print 'load teamsbyname json'
with open('teams/teamsbyname.json') as data_file:
teamsbyname = json.load(data_file)
print 'load JSON-'+str(year)+'-'+str(week)+'-DK.json json'
with open('salary/JSON-'+str(year)+'-'+str(week)+'-DK.json') as data_file:
datadk = json.load(data_file)
print 'load JSON-'+str(year)+'-'+str(week)+'-FD.json json'
with open('salary/JSON-'+str(year)+'-'+str(week)+'-FD.json') as data_file:
datafd = json.load(data_file)
salaryoffense = []
salarydefense = []
for i, v in enumerate(datadk['data']):
qteam = cleanTeamNameDK(v['teamAbbrev'])
qname = cleanPlayerNameDK(v['Name'])
qgame = nfldb.Query(db).game(season_year=year, week=week, season_type='Regular', team=qteam).as_games()
if v['Position'] != 'DST':
matches = nfldb.player_search(db, qname, limit=15)
optplayers = {}
cnt_plyr = 0
cnt_zero = 0
print '---------------------------'
for (player, dist) in matches:
if dist == 0:
cnt_zero += 1
cnt_plyr += 1
optplayers[str(cnt_plyr)] = player
print str(cnt_plyr) + ') Similarity score: %d, Player: %s' % (dist, player)
if cnt_zero == 1:
opt = '1'
else:
opt = raw_input('select the real... ' + qname + ' (' + qteam + ', ' + v['Position'] + '): ')
salaryoffense.append({
'01salary_type': 'dk',
'02player_id': optplayers[opt].player_id if optplayers[opt].player_id else '00',
'03gsis_id': qgame[0].gsis_id,
'04team': qteam,
'05full_name': optplayers[opt].full_name,
'06position': v['Position'],
'07salary': v['Salary'],
'08season_year': year,
'09week': week,
'10searchname': qname,
'11check': optplayers[opt].full_name.upper() == v['Name'].upper()
})
else:
salarydefense.append({
'01salary_type': 'dk',
'02team_id': qteam,
'03gsis_id': qgame[0].gsis_id,
'04team': qteam,
'07salary': v['Salary'],
'08season_year': year,
'09week': week
})
# DK: {"Position":"WR","Name":"Julio Jones","Salary":9200,"GameInfo":"TB@Atl 01:00PM ET","AvgPointsPerGame":25.143,"teamAbbrev":"Atl"}
# FD: {"Id":14190,"Position":"WR","First Name":"Julio","Last Name":"Jones","FPPG":19.7,"Played":7,"Salary":9200,"Game":"TB@ATL","Team":"ATL","Opponent":"TB","Injury Indicator":"","Injury Details":"","FIELD13":"","FIELD14":""}
for i, v in enumerate(datafd['data']):
qteam = cleanTeamNameDK(v['Team'])
qname = v['First Name'] + ' ' + v['Last Name']
qgame = nfldb.Query(db).game(season_year=year, week=week, season_type='Regular', team=qteam).as_games()
if v['Position'] != 'D':
matches = nfldb.player_search(db, qname, limit=15)
optplayers = {}
cnt_plyr = 0
cnt_zero = 0
print '---------------------------'
for (player, dist) in matches:
if dist == 0:
cnt_zero += 1
cnt_plyr += 1
optplayers[str(cnt_plyr)] = player
print str(cnt_plyr) + ') Similarity score: %d, Player: %s' % (dist, player)
if cnt_zero == 1:
opt = '1'
else:
opt = raw_input('select the real... ' + qname + ' (' + qteam + ', ' + v['Position'] + '): ')
salaryoffense.append({
'01salary_type': 'fd',
'02player_id': optplayers[opt].player_id if optplayers[opt].player_id else '00',
'03gsis_id': qgame[0].gsis_id,
'04team': qteam,
'05full_name': optplayers[opt].full_name,
'06position': v['Position'],
'07salary': v['Salary'],
'08season_year': year,
'09week': week,
'10searchname': qname,
'11check': optplayers[opt].full_name.upper() == qname.upper()
})
else:
salarydefense.append({
'01salary_type': 'fd',
'02team_id': qteam,
'03gsis_id': qgame[0].gsis_id,
'04team': qteam,
'07salary': v['Salary'],
'08season_year': year,
'09week': week
})
#Set up Worksheets
workbook = xlsxwriter.Workbook('salaries_' + str(year) + '_' + str(week) + '.xlsx')
fmt_data = workbook.add_format({'bold': False})
offense = workbook.add_worksheet('offense')
defense = workbook.add_worksheet('defense')
#PRINT OFFENSE SALARIES
offense.add_table('A1:K'+str(len(salaryoffense) + 1), {
'columns': [
{'header': 'salary_type'},
{'header': 'player_id'},
{'header': 'gsis_id'},
{'header': 'team'},
{'header': 'full_name'},
{'header': 'position'},
{'header': 'salary'},
{'header': 'season_year'},
{'header': 'week'},
{'header': 'search_name'},
{'header': 'check'}
]}
)
row = 1
for i, v in enumerate(salaryoffense):
col = 0
for key, val in sorted(v.iteritems()):
offense.write(row, col, val, fmt_data)
col += 1
row += 1
#PRINT DEFENSE SALARIES
defense.add_table('A1:G'+str(len(salarydefense) + 1), {
'columns': [
{'header': 'salary_type'},
{'header': 'team_id'},
{'header': 'gsis_id'},
{'header': 'team'},
{'header': 'salary'},
{'header': 'season_year'},
{'header': 'week'}
]}
)
row = 1
for i, v in enumerate(salarydefense):
col = 0
for key, val in sorted(v.iteritems()):
defense.write(row, col, val, fmt_data)
col += 1
row += 1
workbook.close() |
def run_minimax (tree_node, minv, maxv, heuristic):
'''Runs a minimax search on the given minimax tree node down to
a depth of d and returns the (score, move). move is in the range
0..6 '''
if (tree_node.isLeaf == True):
return (heuristic(tree_node.board, tree_node.turn), tree_node.lastMove)
if (tree_node.type == "max"):
updated_minv = minv # this will change as we get child values
move_to_make = -1 # initially we don't know which move to make
for child_node in tree_node.children:
(child_node_value, child_move) = run_minimax(child_node, updated_minv, maxv, heuristic)
if (child_node_value > updated_minv):
updated_minv = child_node_value
move_to_make = child_node.lastMove
if (maxv <= updated_minv):
return (updated_minv, move_to_make)
return (updated_minv, move_to_make)
else: # type is min
updated_maxv = maxv # this will change as we get child values
move_to_make = -1 # initially we don't know which move to make
for child_node in tree_node.children:
(child_node_value, child_move) = run_minimax(child_node, minv, updated_maxv, heuristic)
if (child_node_value < updated_maxv):
updated_maxv = child_node_value
move_to_make = child_node.lastMove
if (updated_maxv <= minv):
return (updated_maxv, move_to_make)
return (updated_maxv, move_to_make) |
s = input()
d = { 'SUN' : 7,
'MON' : 6,
'TUE' : 5,
'WED' : 4,
'THU' : 3,
'FRI' : 2,
'SAT' : 1
}
if s in d.keys():
print(d[s])
|
n = int(raw_input())
boys = [int(x) for x in raw_input().split()]
m = int(raw_input())
girls = [int(x) for x in raw_input().split()]
boys.sort()
girls.sort()
pairs = 0
for i in range(n-1,-1,-1):
boy = boys[i]
for j in range(m-1,-1,-1):
if girls[j]==0:
continue
if abs(boy-girls[j])<=1:
pairs+=1
girls[j]=0
break
print pairs
|
"""
Fake Company class object
Copyright (c) 2019 Julien Kervizic
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
from typing import Dict
from faker import Faker
from tld import is_tld
from consistent_faker.utils import get_top_level_domain
from consistent_faker.classes import FakeBaseObject
FAKE = Faker()
class FakeCompany(FakeBaseObject):
"""
Create a Fake Company object which can be used in order to generate
email addresses. The method generates a company id, name and domain
Parameters
-----------
uid: Optional[uuid.UUID]
company_name: Optional[str]: name of the company
company_domain: Optional[str]: top level domain name (eg .co.uk)
Examples:
-----------
Generate a random fake company
>>>FakeCompany()
FakeCompany('Frank, Brown and Brown')
"""
def __init__(self, **kwargs):
FakeBaseObject.__init__(self, uid=kwargs.get("uid"))
self._company_name = self._init_company_name(
company_name=kwargs.get("company_name")
)
self._top_level_domain = self._init_top_level_domain(
top_level_domain=kwargs.get("top_level_domain")
)
def __repr__(self):
return "FakeCompany('%s')" % self.company_name
@property
def company_name(self) -> str:
"""str: Fake company name"""
return self._company_name
@property
def top_level_domain(self):
"""
str: Top level domain name
example: .co.uk, .com, .org
"""
return self._top_level_domain
def get_company_domain(self) -> str:
"""
Generate a domain name from the company name
Returns:
str: a domain name
"""
lower_comp_name = self.company_name.lower()
domain_prefix = re.sub("[^0-9a-zA-Z]+", "", lower_comp_name)
return domain_prefix + self.top_level_domain
def to_dict(self) -> Dict:
"""
Serialize the fake order object to a dictionary
"""
company_dict = {"uid": str(self.uid), "company_name": self.company_name}
return company_dict
@classmethod
def to_dataframe(cls):
"""Not implemented"""
return NotImplemented
@classmethod
def _init_top_level_domain(cls, top_level_domain: str = None) -> str:
"""
Assign or generate a top level domain
Returns:
str: assign or generate a random top level domain
eg: .com, .co.uk
"""
if top_level_domain and isinstance(top_level_domain, str):
if is_tld(top_level_domain[1:]):
return top_level_domain
raise ValueError(
"%s is not a valid top level domain" % top_level_domain
)
if top_level_domain:
return TypeError("top_level_domain kwarg should be an instance of str")
return get_top_level_domain()
@classmethod
def _init_company_name(cls, company_name: str = None) -> str:
"""
Assign or generate a company name
Returns:
str: company name
"""
if company_name and isinstance(company_name, str):
return company_name
if company_name:
return TypeError("company_name kwarg should be an instance of str")
return FAKE.format('company')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.