text stringlengths 38 1.54M |
|---|
import sys
import pygame
from pygame.sprite import Group
from common.Settings import Settings
from common.ship import Ship
from common.zidan import Zidan
from common.wxr import Wxr
def check_event(ship,setvar,screen,zidans):
"""捕捉键盘和鼠标事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key==pygame.K_q:
sys.exit()
if event.key == pygame.K_RIGHT:
ship.move_right=True
elif event.key==pygame.K_LEFT:
ship.move_left =True
elif event.key==pygame.K_UP:
ship.move_up=True
elif event.key==pygame.K_DOWN:
ship.move_down=True
# 支持移动方向的同时也开火
if event.key==pygame.K_SPACE:
new_zidan=Zidan(ship,setvar,screen)
zidans.add(new_zidan)
elif event.type == pygame.KEYUP:
ship.move_right=False
ship.move_left=False
ship.move_up=False
ship.move_down=False
def update_screen(ship,screen,setvar,zidans,wxrs):
"""更新屏幕显示"""
# 每次循环时都重新绘制屏幕
screen.fill(setvar.bg_color) # 方法只接受一个参数,一种颜色
ship.blitme() # 绘制飞船
wxrs.draw(screen) # 调用编组的绘制方法,会绘制每个外星人
for zidan in zidans.sprites():
zidan.draw_zidan() # 调动每个子弹的绘制方法
# 让绘制的屏幕可见
pygame.display.flip()
def update_zidan(zidans):
"""更新子弹位置并删除飞出界面的子弹"""
zidans.update() # 更新整个子弹编组
# 删除子弹
for zidan in zidans:
# print(zidan.rect.bottom) 从位置可以看到,为0进行删除
if zidan.rect.top <= 0:
zidans.remove(zidan)
def update_wxr(wxrs):
"""更新外星人的位置"""
wxrs.update()
# 删除击中或者到达了屏幕底部的外星人
for wxr in wxrs:
print(111)
print(wxr.rect.top)
if wxr.rect.bottom<=0:
wxrs.remove(wxr)
def create_wxrq(setvar,screen,wxrs):
"""创建外星人群"""
# 计算最多可以一行放几个外星人,然后生成一队随机整数的外星人
def run_ganme():
"""运行游戏的主方法"""
setvar = Settings() # 初始化参数
pygame.init() # 初始化背景设置
screen = pygame.display.set_mode((setvar.screen_width, setvar.screen_height)) # 创建显示窗口
pygame.display.set_caption("最炫酷的游戏(按Q退出)")
ship = Ship(screen,setvar) #初始化一个主角对象
# wxr=Wxr(screen,setvar)
# 创建一个用于存储子弹的编组
zidans=Group()
wxrs=Group() # 创建外星人编组
create_wxrq(setvar,screen,wxrs)
# 开始游戏的主循环
while True:
'''监视键盘和鼠标事件'''
check_event(ship,setvar,screen,zidans)
ship.update() # 根据键盘事件更新位置
update_zidan(zidans) # 更新子弹
# update_wxr(wxrs) # 更新外星人位置
update_screen(ship, screen, setvar, zidans,wxrs) # 更新屏幕显示
if __name__ == '__main__':
run_ganme() |
import cv2
cam = cv2.VideoCapture(0)
fourcc = cv2.VideoWrite_fourcc(*'XVID')
out = cv2.VideoWrite('my_cam_vis.avi',fourcc, 20.0, (640, 480))
while True:
ret, img = cam.read()
cv2.imshow('my_cma', img)
out.write(img)
if cv2.waitKey(10) == 27:
break
cam.release()
out.release()
cv2.destroyAllWindows()
|
#!/usr/bin/env python3
# ============================================================================
# File: featprint
# Author: Erik Johannes Husom
# Created: 2019-12-05
# ----------------------------------------------------------------------------
# Description:
# Save feature importance as numpy arrays.
# ============================================================================
import numpy as np
feature_importance = {'tree': [0.04119991, 0.05167874, 0.12805375, 0.00454587,
0.1973286, 0.00739907, 0.00438991, 0.00442739, 0.00277796, 0.08245902,
0.09266789, 0.13685822, 0.0550103, 0.08147789, 0.08418555, 0.02553992,],
'bagging': [0.05641168, 0.04191471, 0.08184195, 0.00846297, 0.1960321,
0.00951002, 0.00454187, 0.00687586, 0.00367635, 0.09022062, 0.08253264,
0.12991216, 0.06743362, 0.07035134, 0.0808825, 0.06939961], 'random':
[0.06808629, 0.05378452, 0.07542956, 0.03025101, 0.06268223, 0.03397167,
0.02389375, 0.04879707, 0.03624232, 0.10035244, 0.09986861, 0.10539286,
0.05962506, 0.07273982, 0.05307037, 0.07581242]}
tree = np.array(feature_importance['tree'])
bagging = np.array(feature_importance['bagging'])
randomforest = np.array(feature_importance['random'])
np.save('featimp-decisiontree-case1.npy', tree)
np.save('featimp-bagging-case1.npy', bagging)
np.save('featimp-randomforest-case1.npy', randomforest)
a = np.load('featimp-decisiontree-case1.npy')
b = np.load('featimp-bagging-case1.npy')
c = np.load('featimp-randomforest-case1.npy')
tree_case2 = [0.07068566, 0.03171934, 0.02256834, 0.0058181, 0.1833569, 0.00891043,
0.00281869, 0.00478877, 0.00357627, 0.091519, 0.17538176, 0.10870657,
0.0761631, 0.07178151, 0.02735275, 0.11485281]
bagging_case2 = [0.06233289, 0.03632528, 0.04900637, 0.01413281, 0.18380748, 0.00859397,
0.00407993, 0.00560589, 0.00302398, 0.10674585, 0.13095476, 0.1355339,
0.05684061, 0.0919102, 0.03198606, 0.07912002]
random_case2 = [0.06956644, 0.05235452, 0.07125713, 0.0212575, 0.06263269, 0.02973148,
0.03313004, 0.04892518, 0.03153263, 0.09950679, 0.10636449, 0.11232948,
0.06323731, 0.07206387, 0.05282183, 0.0732886 ]
np.save('featimp-decisiontree-case2.npy', tree_case2)
np.save('featimp-bagging-case2.npy', bagging_case2)
np.save('featimp-randomforest-case2.npy', random_case2)
print(a)
print('---------')
print(b)
print('---------')
print(c)
|
#!/home/linus/PycharmProjects/flask/bin/python2.7
import os,unittest
from flaskr.models import User,Post,Comment,Like
from config import basedir
from flaskr import app,db
from flaskr.appviews import uniqueMail
from datetime import datetime,timedelta
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING']=True
app.config['WTF-CSRF_DATABASE_URI']=False
app.config['SQLALCHEMY_DATABASE_RUI']='sqlite:///'+os.path.join(basedir,'test.db')
self.app=app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
#
# def test_avater(self):
# u=User(nickname='john',email='john@example.com')
# avatar=u.avatar(128)
# #
# def test_make_unique_email(self):
# u=User(nickname='john',email='john@exampled.com')
# db.session.add(u)
# db.session.commit()
# boolemail=uniqueMail('john@exampled.com')
# assert not boolemail
# u1=User(nickname='susan',email='john@example.com')
# db.session.add(u1)
# db.session.commit()
# boolemail2=uniqueMail('john@exampled.com')
# assert not boolemail2
# assert boolemail ==boolemail2
#
# def test_follow(self):
# u1 = User(nickname = 'john', email = 'john@example.com')
# u2 = User(nickname = 'susan', email = 'susan@example.com')
# db.session.add(u1)
# db.session.add(u2)
# db.session.commit()
# assert u1.unfollow(u2) == None
# u = u1.follow(u2)
# db.session.add(u)
# db.session.commit()
# assert u1.follow(u2) == None
# assert u1.is_following(u2)
# assert u1.followed.count() == 1
# assert u1.followed.first().nickname == 'susan'
# assert u2.followers.count() == 1
# assert u2.followers.first().nickname == 'john'
# u = u1.unfollow(u2)
# assert u != None
# db.session.add(u)
# db.session.commit()
# assert u1.is_following(u2) == False
# assert u1.followed.count() == 0
# assert u2.followers.count() == 0
# def test_follow_posts(self):
# u1 = User(nickname = 'john', email = 'john@example.com',password='wei')
# u2 = User(nickname = 'susan', email = 'susan@example.com',password='wei')
# u3 = User(nickname = 'mary', email = 'mary@example.com',password='wei')
# u4 = User(nickname = 'david', email = 'david@example.com',password='wei')
# db.session.add(u1)
# db.session.add(u2)
# db.session.add(u3)
# db.session.add(u4)
# # make four posts
# utcnow = datetime.utcnow()
# p1 = Post(body = "post from john", author = u1, timestamp = utcnow + timedelta(seconds = 1))
# p2 = Post(body = "post from susan", author = u2, timestamp = utcnow + timedelta(seconds = 2))
# p3 = Post(body = "post from mary", author = u3, timestamp = utcnow + timedelta(seconds = 3))
# p4 = Post(body = "post from david", author = u4, timestamp = utcnow + timedelta(seconds = 4))
# db.session.add(p1)
# db.session.add(p2)
# db.session.add(p3)
# db.session.add(p4)
# db.session.commit()
# # setup the followers
# u1.follow(u1) # john follows himself
# u1.follow(u2) # john follows susan
# u1.follow(u4) # john follows david
# u2.follow(u2) # susan follows herself
# u2.follow(u3) # susan follows mary
# u3.follow(u3) # mary follows herself
# u3.follow(u4) # mary follows david
# u4.follow(u4) # david follows himself
# db.session.add(u1)
# db.session.add(u2)
# db.session.add(u3)
# db.session.add(u4)
# db.session.commit()
# # check the followed posts of each user
# f1 = u1.followed_posts().all()
# f2 = u2.followed_posts().all()
# f3 = u3.followed_posts().all()
# f4 = u4.followed_posts().all()
# assert len(f1) == 3
# assert len(f2) == 2
# assert len(f3) == 2
# assert len(f4) == 1
# assert f1 == [p4, p2, p1]
# assert f2 == [p3, p2]
# assert f3 == [p4, p3]
# assert f4 == [p4]
def test_reject_posts(self):
u1 = User(nickname = 'john', email = 'john@example.com',password='wei')
u2 = User(nickname = 'susan', email = 'susan@example.com',password='wei')
u3 = User(nickname = 'mary', email = 'mary@example.com',password='wei')
u4 = User(nickname = 'david', email = 'david@example.com',password='wei')
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.add(u4)
db.session.commit()
u1.follow(u1) # john follows himself
u1.follow(u2) # john follows susan
u1.follow(u4) # john follows david
u2.follow(u2) # susan follows herself
u2.follow(u3) # susan follows mary
u3.follow(u3) # mary follows herself
u3.follow(u4) # mary follows david
u4.follow(u4) # david follows himself
db.session.add(u1)
db.session.add(u2)
db.session.add(u3)
db.session.add(u4)
db.session.commit()
print u1.Followeds()
print u4.Followers()
# make four posts
# utcnow = datetime.utcnow()
# p1 = Post(body = "post from john", author = u1, timestamp = utcnow + timedelta(seconds = 1),title='heheda')
# p2 = Post(body = "post from susan", author = u2, timestamp = utcnow + timedelta(seconds = 2))
# p3 = Post(body = "post from mary", author = u3, timestamp = utcnow + timedelta(seconds = 3))
# p4 = Post(body = "post from david", author = u4, timestamp =utcnow + timedelta(seconds = 4))
# c1=Comment(body="it's amazing!!!",byuser=u2,topost=p1,timestamp=utcnow + timedelta(seconds = 5))
# pc=p1.comments.all()
# L1 = Like(is_like=True, topost=p1, byuser=u2,timestamp = utcnow + timedelta(seconds = 1))
# L2 = Like(is_like=True, topost=p1, byuser=u1,timestamp = utcnow + timedelta(seconds = 2))
# db.session.add(L1)
# db.session.add(L2)
# db.session.commit()
# print p1.likes.count()
# lp1=p1.likes.order_by(Like.timestamp.desc()).all()
#
# print lp1[0].byuser.nickname
# print lp1[1].byuser.nickname
# print pc
# print pc[0].body
# u2c=u2.comments.all()
# print u2c
# print u2c[0].body
# print c1.byuser.nickname
# print c1.topost.title
# db.session.add(p1)
# db.session.add(p2)
# db.session.add(p3)
# db.session.add(p4)
# db.session.add(c1)
# db.session.commit()
# # setup the followers
# u1.follow(u1) # john follows himself
# u1.follow(u2) # john follows susan
# u1.follow(u4) # john follows david
# u2.follow(u2) # susan follows herself
# u2.follow(u3) # susan follows mary
# u3.follow(u3) # mary follows herself
# u3.follow(u4) # mary follows david
# u4.follow(u4) # david follows himself
# db.session.add(u1)
# db.session.add(u2)
# db.session.add(u3)
# db.session.add(u4)
# db.session.commit()
# u1.reject(u2)
# u1.reject(u4)
# db.session.add(u1)
# db.session.commit()
# # check the followed posts of each user
# f1 = u1.followed_posts()
# f2 = u2.followed_posts()
# f3 = u3.followed_posts()
# f4 = u4.followed_posts()
# assert len(f1) == 1
# assert len(f2) == 2
# assert len(f3) == 2
# assert len(f4) == 1
# assert f1 == [p1]
# assert f2 == [p3, p2]
# assert f3 == [p4, p3]
# assert f4 == [p4]
if __name__=='__main__':
unittest.main()
|
# Generated by Django 2.0 on 2020-03-08 07:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0011_headman'),
]
operations = [
migrations.AddField(
model_name='headman',
name='group',
field=models.CharField(default='0', max_length=2),
),
]
|
from django.contrib import admin
from .models import Employee
# Register your models here.
@admin.register(Employee)
class EmployeeAdmin(admin.ModelAdmin):
list_display = ('employee_ID','first_name','last_name','email','contact','address','manager_ID','department_ID','hire_date','is_active')
|
import math
class Health_Kit():
def __init__(self,x_position,starting_point, health, velocity):
self.x_position = x_position
self.y_position = starting_point
self.starting_point = starting_point
self.health = health
self.velocity = velocity
self.frequency = 0.003
def move(self,):
self.x_position -= self.velocity
self.y_position = 200*math.sin(self.x_position*2*math.pi*self.frequency) + self.starting_point
|
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyexcel
from .worksheet import Worksheet
|
def readStyle():
with open('src/style.qss', 'r', encoding='UTF-8') as style:
return style.read() |
import flask
import json
from schemainspect import get_inspector
from sqlalchemy.ext.declarative import declarative_base
from sqlbag import Base
Model = declarative_base(cls=Base)
def selectables(s):
i = get_inspector(s)
names = [_.name for _ in (i.selectables.values())]
return names
class Response(flask.Response):
@property
def json(self):
return json.loads(self.get_data(as_text=True))
|
# -*- coding: utf-8 -*-
import threading
import ali_speech
from ali_speech.callbacks import SpeechSynthesizerCallback
from ali_speech.constant import TTSFormat
from ali_speech.constant import TTSSampleRate
class MyCallback(SpeechSynthesizerCallback):
# 参数name用于指定保存音频的文件
def __init__(self, name):
self._name = name
self._fout = open(name, 'wb')
def on_binary_data_received(self, raw):
print('MyCallback.on_binary_data_received: %s' % len(raw))
self._fout.write(raw)
def on_completed(self, message):
print('MyCallback.OnRecognitionCompleted: %s' % message)
self._fout.close()
def on_task_failed(self, message):
print('MyCallback.OnRecognitionTaskFailed-task_id:%s, status_text:%s' % (
message['header']['task_id'], message['header']['status_text']))
self._fout.close()
def on_channel_closed(self):
print('MyCallback.OnRecognitionChannelClosed')
def process(client, appkey, token, text, audio_name, voice):
callback = MyCallback(audio_name)
synthesizer = client.create_synthesizer(callback)
synthesizer.set_appkey(appkey)
synthesizer.set_token(token)
synthesizer.set_voice(voice)
synthesizer.set_text(text)
synthesizer.set_format(TTSFormat.WAV)
synthesizer.set_sample_rate(TTSSampleRate.SAMPLE_RATE_16K)
synthesizer.set_volume(50)
synthesizer.set_speech_rate(-200)
synthesizer.set_pitch_rate(0)
try:
ret = synthesizer.start()
if ret < 0:
return ret
synthesizer.wait_completed()
except Exception as e:
print(e)
finally:
synthesizer.close()
def process_multithread(client, appkey, token, number):
thread_list = []
for i in range(0, number):
text = "这是线程" + str(i) + "的合成。"
audio_name = "sy_audio_" + str(i) + ".wav"
thread = threading.Thread(target=process, args=(client, appkey, token, text, audio_name, voice))
thread_list.append(thread)
thread.start()
for thread in thread_list:
thread.join()
if __name__ == "__main__":
client = ali_speech.NlsClient()
# 设置输出日志信息的级别:DEBUG、INFO、WARNING、ERROR
client.set_log_level('INFO')
voice = 'Aixia'
appkey = 'qziLDmH5M82EHpFQ'
token = 'c8606e51e6a44a4983f10edf8a784161'
# text = "岁月匆匆而过,悄悄回首,我已走进小学生活近六年了,念及往事,不生唏嘘。那人生道路上的无数个第一次就像波涛起伏的海浪,荡漾在我的心头。是那样的亲切而有熟悉,又是那样的美好而和谐。第一次上台表演的经历就一直使我不能忘怀。那是我在五岁第一次上台时,在上台前,我的心忐忑不安,总是无法调整出好的情绪。开始表演了,强烈的镁光灯直射下来,就像一双犀利的眼睛,盯着我喘不过气来。我就更紧张了。当我看到台下这么多人的目光聚集在我的身上,原来就担心的我一下子忘了自己的动作,傻呆呆的站在幕布旁。那一刹那,我听到的音乐就像奔驰的野马,嗡嗡作响;镁光灯则是一把锋利而尖锐的箭,射进了我的内心深处。好在这时,老师在幕布旁不断地鼓励我,小声地说:“你一定能行!”我深深的吸了一口气,很快镇静下来。我微笑着自信地走上了舞台。一上台,我就好像置于一池碧水中,身体变得那样的舒展,跳的每一个动作都是那么娴熟而自然。那音乐如潺潺的溪水,镁光灯也如正午的暖阳。我的舞姿犹如一只傲气的白天鹅在湖面上游动;又像一缕纯洁的阳光,干净而温暖;更像一直蓬勃的向日葵,正努力地向上生长。终于,我在观众们的掌声中退了场。事后,我一直在想:有自信不一定能成功。但是,如果你充满自信,就有成功的希望。自信是飞向蓝天的翅膀,是航行的船桨。在任何时候,自信都会助你一臂之力,助你到达成功的彼岸。让自己成为一个充满自信的人吧!我爱第一次,他教会了我成功的秘笈:充满自信,挑战自信。"
text = '同学你好'
audio_name = 'audio.mp3'
process(client, appkey, token, text, audio_name, voice)
# 多线程示例
# process_multithread(client, appkey, token, 2) |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 14:33:36 2018
@author: bramv
"""
import numpy as np
import matplotlib.pyplot as plt
import calendar
import calculate_geostrophic_wind as gw
import read_cabauw_data as r
import settings as s
year = 2016
for i in range(11, 12):
months = [i-1, i] if i > 1 else [12, i]
years = [year, year] if i > 1 else [year - 1, year]
n_days = [calendar.monthrange(int(years[k]), int(months[k]))[1] for k in range(len(months))]
gw_data = gw.calculate_geostrophic_wind(years, months)
data = r.read_and_process_cabauw_data(years, months)
"""Plots are now created for the period from 12 to 12 UTC, instead of 0 to 0 UTC.
For a given date, the time range is from 12 UTC at the previous date to 12 UTC at the given date.
In order to plot the data for this time range, all datasets are below shifted backward in time by
12 hours.
"""
shift = 72
for j in data.__dict__:
if len(eval('data.'+j).shape) >=2:
exec('data.'+j+'= np.reshape(data.'+j+', (data.'+j+'.shape[0] * data.'+j+'.shape[1],) + (data.'+j+'.shape[2:] if len(data.'+j+'.shape) > 2 else ()))')
exec('data.'+j+'=data.'+j+'[n_days[0] * 144 - shift: - shift]')
exec('data.'+j+'=np.reshape(data.'+j+', (n_days[-1], 144) + (data.'+j+'.shape[1:] if len(data.'+j+'.shape) > 1 else ()))')
for j in gw_data.__dict__:
if len(eval('gw_data.'+j).shape) >=2:
exec('gw_data.'+j+'= np.reshape(gw_data.'+j+', (gw_data.'+j+'.shape[0] * gw_data.'+j+'.shape[1],) + (gw_data.'+j+'.shape[2:] if len(gw_data.'+j+'.shape) > 2 else ()))')
exec('gw_data.'+j+'=gw_data.'+j+'[n_days[0] * 144 - shift: - shift]')
exec('gw_data.'+j+'=np.reshape(gw_data.'+j+', (n_days[-1], 144) + (gw_data.'+j+'.shape[1:] if len(gw_data.'+j+'.shape) > 1 else ()))')
#%%
figure_numbers_pos = [-0.125, 1.04]
fig, ax = plt.subplots(int(np.ceil(n_days[-1]/5)),5, figsize = (20,20))
plot_hours = np.array([12, 18, 0, 6])
colors = ['blue', 'red', 'green', 'yellow']
handles_windprofile = []
def plot_windprofiles(ax, j):
ax.set_aspect('equal')
for i in range(len(plot_hours)):
hour = plot_hours[i]
time_index = np.argmin(np.abs(data.hours - hour))
u_j, v_j = data.u[j, time_index, :-1], data.v[j, time_index, :-1] #Exclude the last element as it is np.nan
u_g = gw_data.V_g[j, time_index, 0]; v_g = gw_data.V_g[j, time_index, 1]
ax.plot(u_j, v_j, color = colors[i], marker = 'o', markersize = 3)
ax.plot(u_g, v_g, color = colors[i], marker = 'o', markersize = 5)
handles_windprofile.append(ax.plot(u_j, v_j, color = colors[i], linestyle = '-')[0])
if i == 0:
u_min = u_j.min(); u_max = u_j.max()
v_min = v_j.min(); v_max = v_j.max()
else:
u_min = np.min([u_min, u_g, u_j.min()]); u_max = np.max([u_max, u_g, u_j.max()])
v_min = np.min([v_min, v_g, v_j.min()]); v_max = np.max([v_max, v_g, v_j.max()])
for k in range(len(u_j)):
if k in (0, len(u_j) - 1):
ax.text(u_j[k], v_j[k], str(int(data.z[k])))
ax.text(figure_numbers_pos[0], figure_numbers_pos[1], str(j+1)+')', transform=ax.transAxes, fontsize = 15)
max_radius = int(np.ceil(np.max(np.abs([u_min, u_max, v_min, v_max]))))
dr = int(max_radius/3)
for i in np.arange(0.0, max_radius+dr, dr):
ax.plot(i * np.sin(np.linspace(0, 2*np.pi, 50)), i * np.cos(np.linspace(0, 2*np.pi, 50)), color = 'black', linewidth = 0.5)
do = 1
x_min = np.min([int(np.floor(u_min/do)*do)-do, -do]); y_min = np.min([int(np.floor(v_min/do)*do)-do, -do])
x_max = np.max([int(np.ceil(u_max/do)*do)+do, do]); y_max = np.max([int(np.ceil(v_max/do)*do)+do, do])
x_range = x_max-x_min; y_range = y_max-y_min
if x_range>y_range:
y_min -= (x_range-y_range)/2; y_max += (x_range-y_range)/2
else:
x_min -= (y_range-x_range)/2; x_max += (y_range-x_range)/2
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
for j in range(len(ax.flat)):
try:
plot_windprofiles(ax.flat[j],j)
if j == 0:
ax.flat[j].set_xlabel('u (m/s)'); ax.flat[j].set_ylabel('v (m/s)')
except Exception: continue #Will occur when j >= n_days
plt.suptitle('12Z previous day - 12Z current day', x = 0.5, y = 0.91, fontweight = 'bold', fontsize = 14)
plt.figlegend(handles_windprofile, [format(j, '02d')+'z' for j in plot_hours], loc = [0.37,0.05], ncol = 4, labelspacing=0., fontsize = 12 )
plt.figlegend(handles_windprofile, [format(j, '02d')+'z' for j in plot_hours], loc = [0.915,0.5], ncol = 1, labelspacing=0., fontsize = 12 )
plt.savefig(s.imgs_path+'Overview/'+'12Z-12Z_wind_'+str(year)+format(i, '02d')+'.jpg', dpi = 120, bbox_inches = 'tight')
plt.show()
#%%
fig, ax = plt.subplots(int(np.ceil(n_days[-1]/5)),5, figsize = (20,20))
plot_heights = [10, 80, 200]
colors = ['blue', 'red', 'green', 'yellow']
handles_windcycle = []
def plot_windcycles(ax, j):
ax.set_aspect('equal')
for i in range(len(plot_heights)):
height = plot_heights[i]
z_index = np.argmin(np.abs(data.z - height))
u_j, v_j = data.u[j, :, z_index], data.v[j, :, z_index]
ax.plot(u_j, v_j, color = colors[i], marker = 'o', markersize = 1.5)
handles_windcycle.append(ax.plot(u_j, v_j, color = colors[i], linestyle = '-', linewidth = 0.75)[0])
if i == 0:
u_min = u_j.min(); u_max = u_j.max()
v_min = v_j.min(); v_max = v_j.max()
else:
u_min = np.min([u_min, u_j.min()]); u_max = np.max([u_max, u_j.max()])
v_min = np.min([v_min, v_j.min()]); v_max = np.max([v_max, v_j.max()])
for k in (0, -1):
ax.text(u_j[k], v_j[k], 's' if k == 0 else 'e', fontsize = 12)
u_g = gw_data.V_g[j, :, 0]; v_g = gw_data.V_g[j, :, 1]
ax.plot(u_g, v_g, color = 'black', marker = 'o', markersize = 1.5)
handles_windcycle.append(ax.plot(u_g, v_g, color = 'black', linestyle = '-', linewidth = 0.75)[0])
for k in (0, -1):
ax.text(u_g[k], v_g[k], 's' if k == 0 else 'e', fontsize = 12)
u_min = np.min([u_min, u_g.min()]); u_max = np.max([u_max, u_g.max()])
v_min = np.min([v_min, v_g.min()]); v_max = np.max([v_max, v_g.max()])
ax.text(figure_numbers_pos[0], figure_numbers_pos[1], str(j+1)+')', transform=ax.transAxes, fontsize = 15)
max_radius = int(np.ceil(np.max(np.abs([u_min, u_max, v_min, v_max]))))
dr = int(max_radius/3)
for i in np.arange(0.0, max_radius+dr, dr):
ax.plot(i * np.sin(np.linspace(0, 2*np.pi, 50)), i * np.cos(np.linspace(0, 2*np.pi, 50)), color = 'black', linewidth = 0.5)
do = 1
x_min = np.min([int(np.floor(u_min/do)*do)-do, -do]); y_min = np.min([int(np.floor(v_min/do)*do)-do, -do])
x_max = np.max([int(np.ceil(u_max/do)*do)+do, do]); y_max = np.max([int(np.ceil(v_max/do)*do)+do, do])
x_range = x_max-x_min; y_range = y_max-y_min
if x_range>y_range:
y_min -= (x_range-y_range)/2; y_max += (x_range-y_range)/2
else:
x_min -= (y_range-x_range)/2; x_max += (y_range-x_range)/2
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
for j in range(len(ax.flat)):
try:
plot_windcycles(ax.flat[j], j)
if j == 0:
ax.flat[j].set_xlabel('u (m/s)'); ax.flat[j].set_ylabel('v (m/s)')
except Exception: continue #Will occur when j >= n_days
plt.suptitle('12Z previous day - 12Z current day', x = 0.5, y = 0.91, fontweight = 'bold', fontsize = 14)
plt.figlegend(handles_windcycle, [str(j)+' m' for j in plot_heights]+['V_g'], loc = [0.37,0.05], ncol = 4, labelspacing=0., fontsize = 12 )
plt.figlegend(handles_windcycle, [str(j)+' m' for j in plot_heights]+['V_g'], loc = [0.915,0.5], ncol = 1, labelspacing=0., fontsize = 12 )
plt.savefig(s.imgs_path+'Overview/'+'12Z-12Z_wind_cycle_'+str(year)+format(i, '02d')+'.jpg', dpi = 120, bbox_inches = 'tight')
plt.show()
#%%
fig, ax = plt.subplots(int(np.ceil(n_days[-1]/5)),5, figsize = (20,20))
handles_theta = []
def plot_thetaprofiles(ax, j):
for i in range(len(plot_hours)):
hour = plot_hours[i]
time_index = np.argmin(np.abs(data.hours - hour))
theta_j = data.theta[j, time_index]
theta_min = theta_j.min() if i == 0 else np.min([theta_min, theta_j.min()])
theta_max = theta_j.max() if i == 0 else np.max([theta_max, theta_j.max()])
handles_theta.append(ax.plot(theta_j, data.z, color = colors[i])[0])
ax.set_xlim([theta_min - 2, theta_max + 2])
ax.text(figure_numbers_pos[0], figure_numbers_pos[1], str(j+1)+')', transform=ax.transAxes, fontsize = 15)
ax.grid()
for j in range(len(ax.flat)):
try:
plot_thetaprofiles(ax.flat[j], j)
if j == 0:
ax.flat[j].set_xlabel('$\\theta$ (K)'); ax.flat[j].set_ylabel('h (m)')
except Exception: continue #Will occur when j >= n_days
plt.suptitle('12Z previous day - 12Z current day', x = 0.5, y = 0.91, fontweight = 'bold', fontsize = 14)
plt.figlegend(handles_theta, [format(j, '02d')+'z' for j in plot_hours], loc = [0.37,0.05], ncol = 4, labelspacing=0., fontsize = 12)
plt.figlegend(handles_theta, [format(j, '02d')+'z' for j in plot_hours], loc = [0.925,0.5], ncol = 1, labelspacing=0., fontsize = 12)
plt.savefig(s.imgs_path+'Overview/'+'12Z-12Z_theta_'+str(year)+format(i, '02d')+'.jpg', dpi = 120, bbox_inches = 'tight')
plt.show()
#%%
fig, ax = plt.subplots(n_days[-1], 3, figsize = (12, 100))
for j in range(len(ax)):
plot_thetaprofiles(ax[j][0], j)
plot_windprofiles(ax[j][1], j)
plot_windcycles(ax[j][2], j)
if j == 0:
ax[j][0].set_xlabel('$\\theta$ (K)'); ax[j][0].set_ylabel('h (m)')
ax[j][1].set_xlabel('u (m/s)'); ax[j][1].set_ylabel('v (m/s)')
ax[j][2].set_xlabel('u (m/s)'); ax[j][2].set_ylabel('v (m/s)')
plt.suptitle('12Z previous day - 12Z current day', x = 0.5, y = 0.885, fontweight = 'bold', fontsize = 14)
plt.figlegend(handles_theta, [format(j, '02d')+'z' for j in plot_hours], loc = [0.12,0.0625], ncol = 1, labelspacing=0., fontsize = 12)
plt.figlegend(handles_windprofile, [format(j, '02d')+'z' for j in plot_hours], loc = [0.44,0.0625], ncol = 1, labelspacing=0., fontsize = 12)
plt.figlegend(handles_windcycle, [str(j)+' m' for j in plot_heights]+['V_g'], loc = [0.78,0.0625], ncol = 1, labelspacing=0., fontsize = 12)
plt.savefig(s.imgs_path+'Overview/'+'12Z-12Z_combi_'+str(year)+format(i, '02d')+'.jpg', dpi = 120, bbox_inches = 'tight')
plt.show() |
# Generated by Django 3.2 on 2021-04-30 07:41
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('courses', '0016_auto_20210430_1540'),
]
operations = [
migrations.AddField(
model_name='test',
name='test_created_datetime',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='test',
name='test_end_date',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='test',
name='test_end_time',
field=models.TimeField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='test',
name='test_start_date',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='test',
name='test_start_time',
field=models.TimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
CODE_DIR = 'C:/Users/mmall/Documents/github/repler/src/'
SAVE_DIR = 'C:/Users/mmall/Documents/uni/columbia/multiclassification/saves/'
import os, sys, re
import pickle
sys.path.append(CODE_DIR)
import torch
import torch.nn as nn
import torchvision
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as pl
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import animation as anime
from mpl_toolkits.mplot3d import Axes3D
from itertools import permutations, combinations
from tqdm import tqdm
from sklearn import svm, discriminant_analysis, manifold, linear_model
import scipy.stats as sts
import scipy.linalg as la
# import umap
from cycler import cycler
# my code
import students
import assistants
import experiments as exp
import util
import plotting as dicplt
#%% custom classes to allow for identity gradients
class RayLou(nn.ReLU):
def __init__(self, linear_grad=False):
super(RayLou,self).__init__()
self.linear_grad = linear_grad
def deriv(self, x):
if self.linear_grad:
return torch.ones(x.shape)
else:
return (x>0).float()
class TanAytch(nn.Tanh):
def __init__(self, linear_grad=False, rand_grad=False):
super(TanAytch,self).__init__()
self.linear_grad = linear_grad
self.rand_grad = rand_grad
def deriv(self, x):
if self.linear_grad:
if self.rand_grad:
return torch.rand(x.shape)
else:
return torch.ones(x.shape)
else:
return 1-nn.Tanh()(x).pow(2)
class Iden(nn.Identity):
def __init__(self, linear_grad=False):
super(Iden,self).__init__()
self.linear_grad = linear_grad
def deriv(self, x):
if self.linear_grad:
return torch.ones(x.shape)
else:
return torch.ones(x.shape)
#%% Pick data format
num_cond = 8
num_var = 3
# which_data = 'assoc'
# which_data = 'class'
which_data = 'struc_class'
ndat = 5000
# Associative task
if which_data == 'assoc':
p = 2**num_var
allowed_actions = [0,1,2]
# allowed_actions = [0,1,2,4]
# allowed_actions = [0]
p_action = [0.7,0.15,0.15]
# p_action = [0.61, 0.13, 0.13, 0.13]
# p_action = [1.0]
output_states = (this_exp.train_data[0][:ndat,:].data+1)/2
# output_states = this_exp.train_data[1][:ndat,:].data
input_states = (this_exp.train_data[0][:ndat,:].data+1)/2
abstract_conds = util.decimal(this_exp.train_data[1])[:ndat]
cond_set = np.unique(abstract_conds)
# draw the "actions" for each data point
actns = torch.tensor(np.random.choice(allowed_actions, ndat, p=p_action)).int()
actions = torch.stack([(actns&(2**i))/2**i for i in range(num_var)]).float().T
# act_rep = assistants.Indicator(p,p)(util.decimal(actions).int())
act_rep = actions.data
# inputs = np.concatenate([input_states,act_rep], axis=1)
# # inputs = np.concatenate([input_states, this_exp.train_data[1]], axis=1)
inputs = input_states.float()
# # sample the successor states, i.e. input + action
successors = np.mod(this_exp.train_data[1][:ndat,:]+actions, 2)
succ_conds = util.decimal(successors)
succ_counts = np.unique(succ_conds, return_counts=True)[1]
# should the targets be sampled from the training set, or another set?
# train set would be like an autoencoder training, so maybe that's fine
samps = np.concatenate([np.random.choice(np.where(abstract_conds==c)[0],n) \
for c,n in zip(cond_set,succ_counts)])
unscramble = np.argsort(np.argsort(succ_conds))
successor_idx = samps[unscramble]
targets = output_states[successor_idx,:]
# targets = output_state
# Classification w/ random inputs
elif which_data == 'class':
input_states = this_exp.train_data[0][:ndat,:].data
output_states = this_exp.train_data[1][:ndat,:].data
abstract_conds = util.decimal(this_exp.train_data[1])[:ndat]
inputs = input_states.float()
targets = output_states
inp_condition = this_exp.train_conditions[:ndat]
# Classification w/ structured inputs
elif which_data == 'struc_class':
num_var = 2
dim_inp = 1 # dimension per variable
noise = 0.0
ndat = 5000
num_cond = 2**num_var
apply_rotation = False
# apply_rotation = True
# input_task = util.RandomDichotomies(d=[(0,1,2,3),(0,2,4,6),(0,1,4,5)])
input_task = util.RandomDichotomies(d=[(0,1),(0,2)])
# task = util.RandomDichotomies(d=[(0,3,5,6)]) # 3d xor
# task = util.RandomDichotomies(d=[(0,1,6,7)]) # 2d xor
# task = util.RandomDichotomies(d=[(0,1,3,5),(0,2,3,6),(0,1,2,4)]) # 3 corners
# task = util.RandomDichotomies(d=[(0,1,3,5)]) # corner dichotomy
task = util.RandomDichotomies(d=[(0,3)])
# generate inputs
inp_condition = np.random.choice(2**num_var, ndat)
# inp_condition = np.arange(ndat)
# var_bit = (np.random.rand(num_var, num_data)>0.5).astype(int)
var_bit = input_task(inp_condition).numpy().T
means = np.random.randn(num_var, dim_inp)
means /= la.norm(means,axis=1, keepdims=True)
mns = (means[:,None,:]*var_bit[:,:,None]) - (means[:,None,:]*(1-var_bit[:,:,None]))
clus_mns = np.reshape(mns.transpose((0,2,1)), (dim_inp*num_var,-1)).T
if apply_rotation:
C = np.random.rand(num_var*dim_inp, num_var*dim_inp)
clus_mns = clus_mns@la.qr(C)[0][:num_var*dim_inp,:]
inputs = torch.tensor(clus_mns + np.random.randn(ndat, num_var*dim_inp)*noise).float()
# generate outputs
targets = task(inp_condition)
abstract_conds = inp_condition
# %%
manual = True
# manual = False
ppp = 1 # 0 is MSE, 1 is cross entropy
two_layers = False
# two_layers = True
# nonneg = True
nonneg = False
# train_out = True
train_out = False
linear_grad = False
# linear_grad = True
# average_grad = False
# average_grad = True
# nonlinearity = RayLou(linear_grad)
nonlinearity = TanAytch(linear_grad)
# nonlinearity = Iden()
correct_mse = False # if True, rescales the MSE targets to be more like the log odds
N = 100
nepoch = 2000
lr = 1e-4
bsz = 100
n_trn = int(ndat*0.8)
idx_trn = np.random.choice(ndat, n_trn, replace=False)
idx_tst = np.setdiff1d(range(ndat), idx_trn)
# idx_trn = np.arange(ndat)
# idx_tst = np.arange(ndat)
dset = torch.utils.data.TensorDataset(inputs[idx_trn], targets[idx_trn])
dl = torch.utils.data.DataLoader(dset, batch_size=bsz, shuffle=True)
# set up network (2 layers)
# ba = 1/np.sqrt(N)
ba = 1
W1 = torch.FloatTensor(N,inputs.shape[1]).uniform_(-ba,ba)
# W1 = torch.FloatTensor([[1,1],[1,-1],[-1,1],[-1,-1]]).repeat_interleave(N//4,0).repeat_interleave(dim_inp,1)
# W1 = torch.FloatTensor([[1,-1],[-1,1]]).repeat_interleave(N//2,0)
# b1 = torch.FloatTensor(N,1).uniform_(-ba,ba)
# b1 = torch.FloatTensor(torch.zeros(N,1))
b1 = torch.FloatTensor(torch.ones(N,1)*0.1)
W1.requires_grad_(True)
b1.requires_grad_(True)
if two_layers:
ba = 1/np.sqrt(N)
W2 = torch.FloatTensor(N,N).uniform_(-ba,ba)
b2 = torch.FloatTensor(torch.zeros(N,1))
W2.requires_grad_(True)
b2.requires_grad_(True)
ba = 1/np.sqrt(targets.shape[1])
if nonneg:
W = torch.FloatTensor(targets.shape[1],N).uniform_(0,2*ba)
b = torch.FloatTensor(targets.shape[1],1).uniform_(0,2*ba)
else:
# W = torch.FloatTensor(targets.shape[1],N).uniform_(-ba,ba)
W = torch.FloatTensor([1,-1]).repeat(N//2)[None,:]
# W *= (W>0)
# W = torch.FloatTensor(torch.ones(targets.shape[1],N))
# b = torch.FloatTensor(targets.shape[1],1).uniform_(-ba,ba)
b = torch.FloatTensor(torch.zeros(targets.shape[1],1))
if two_layers:
optimizer = optim.Adam([W1, b1, W2, b2], lr=lr)
else:
if train_out:
optimizer = optim.Adam([W1, b1, W, b], lr=lr)
else:
optimizer = optim.Adam([W1], lr=lr)
train_loss = []
test_perf = []
PS = []
CCGP = []
SD = []
lindim = []
gradz_sim = []
gradlin_sim = []
weights = []
# weights2 = []
biases = []
# grad_mag = []
for epoch in tqdm(range(nepoch)):
# loss = net.grad_step(dl, optimizer)
if not np.mod(epoch,10):
weights.append(1*W1.detach().numpy())
# if two_layers:
# weights2.append(1*W2.detach().numpy())
biases.append(1*b1.detach().numpy())
running_loss = 0
# idx = np.random.choice(n_trn, np.min([5000,ndat]), replace=False)
if two_layers:
z1 = nonlinearity(torch.matmul(W1,inputs[idx_tst,:].T) + b1)
z = nonlinearity(torch.matmul(W2,z1) + b2)
else:
z = nonlinearity(torch.matmul(W1,inputs[idx_tst,:].T) + b1)
pred = torch.matmul(W,z) + b
if ppp == 0:
perf = np.sum((pred.T-targets[idx_tst,:]).detach().numpy()**2,1).mean(0)
else:
perf = ((pred.T>0) == targets[idx_tst,:]).detach().numpy().mean(0)
test_perf.append(perf)
# this is just the way I compute the abstraction metrics, sorry
clf = assistants.LinearDecoder(N, 1, assistants.MeanClassifier)
gclf = assistants.LinearDecoder(N, 1, svm.LinearSVC)
D = assistants.Dichotomies(len(np.unique(inp_condition)),
input_task.positives+task.positives, extra=5)
ps = []
ccgp = []
for _ in D:
ps.append(D.parallelism(z.T.detach().numpy(), inp_condition[:ndat][idx_tst], clf))
ccgp.append(D.CCGP(z.T.detach().numpy(), inp_condition[:ndat][idx_tst], gclf, max_iter=1000))
PS.append(ps)
CCGP.append(ccgp)
_, S, _ = la.svd(z.detach()-z.mean(1).detach()[:,None], full_matrices=False)
eigs = S**2
lindim.append((np.sum(eigs)**2)/np.sum(eigs**2))
# Gradient similarity
# if np.mod(epoch,10)==0:
if epoch in [0,nepoch-1]:
errb = (targets[idx_tst,:].T - nn.Sigmoid()(pred)) # bernoulli
errg = (targets[idx_tst,:].T - pred) # gaussian
err = ppp*errb + (1-ppp)*errg # convex sum, in case you want that
d2 = (W.T@err)*nonlinearity.deriv(z) # gradient of the currents
conds = abstract_conds[idx_tst]
cond_grad = np.array([d2[:,conds==i].mean(1).detach().numpy() for i in np.unique(conds)])
gradz_sim.append(util.cosine_sim(cond_grad-cond_grad.mean(0),cond_grad-cond_grad.mean(0)))
# cond_grad = np.array([(W.T@err)[:,conds==i].mean(1).detach().numpy() for i in np.unique(conds)])
cond_grad = np.array([(d2[:,conds==i]@inputs[idx_tst,:][conds==i,:]).detach().numpy().T for i in np.unique(conds)])
gradlin_sim.append(util.cosine_sim(cond_grad-cond_grad.mean(0),cond_grad-cond_grad.mean(0)))
# cond_grad = np.array([((d2[:,conds==i]@z[:,conds==i].T)/np.sum(conds==i)).mean(1).detach().numpy() \
# for i in np.unique(conds)])
# gradw_sim.append(util.cosine_sim(cond_grad,cond_grad))
# do learning
for j, btch in enumerate(dl):
optimizer.zero_grad()
inps, outs = btch
if two_layers:
z1 = nonlinearity(torch.matmul(W1,inps.T) + b1)
curr1 = torch.matmul(W1,inps.T) + b1
z = nonlinearity(torch.matmul(W2,z1) + b2)
curr = torch.matmul(W2,z1) + b2
else:
z = nonlinearity(torch.matmul(W1,inps.T) + b1)
curr = torch.matmul(W1,inps.T) + b1
pred = torch.matmul(W,z) + b
# change the scale of the MSE targets, to be more like x-ent
if (ppp == 0) and correct_mse:
outs = 1000*(2*outs-1)
# loss = -students.Bernoulli(2).distr(pred).log_prob(outs.T).mean()
loss = ppp*nn.BCEWithLogitsLoss()(pred.T, outs) + (1-ppp)*nn.MSELoss()(pred.T,outs)
if manual:
errb = (outs.T - nn.Sigmoid()(pred)) # bernoulli
errg = (outs.T - pred) # gaussian
err = ppp*errb + (1-ppp)*errg # convex sum, in case you want that
d2 = (W.T@err)*nonlinearity.deriv(curr) # gradient of the currents
if two_layers:
W2.grad = -(d2@z1.T)/inps.shape[0]
b2.grad = -d2.mean(1, keepdim=True)
d1 = (W2@d2)*nonlinearity.deriv(curr1)
W1.grad = -(d1@inps)/inps.shape[0]
b1.grad = -d1.mean(1, keepdim=True)
else:
W1.grad = -(d2@inps)/inps.shape[0]
b1.grad = -d2.mean(1, keepdim=True)
# W1 += lr*dw
# b1 += lr*db
else:
loss.backward()
if epoch == 0:
init_grad_w = -(d2@inps)/inps.shape[0]
init_grad_b = -d2.mean(1, keepdim=True)
# grad_mag.append(la.norm(W1.grad.numpy(), axis=0))
optimizer.step()
running_loss += loss.item()
# train_loss.append(loss)
# print('epoch %d: %.3f'%(epoch,running_loss/(j+1)))
train_loss.append(running_loss/(j+1))
# print(running_loss/(i+1))
weights = np.array(weights)
weights2 = np.array(weights2)
biases = np.squeeze(biases)
#%%
# plot_this = np.squeeze(CCGP).mean(-1)
plot_this = np.array(PS)
plt.figure()
epochs = range(1,len(PS)+1)
# plt.plot(range(1,len(inp_PS)+1),out_PS)
# plt.semilogx()
trn = []
for dim in range(task.dim_output):
thisone = plt.plot(epochs, plot_this[...,dim])[0]
trn.append(thisone)
plt.semilogx()
untrn = plt.plot(epochs, plot_this[...,task.dim_output:].mean(1),color=(0.5,0.5,0.5),zorder=0)[0]
plt.legend(trn + [untrn], ['Var %d'%(n+1) for n in range(task.dim_output)] + ['XOR'])
#%%
if two_layers:
z1 = nonlinearity(torch.matmul(W1,inputs.T) + b1).detach().numpy()
z = nonlinearity(torch.matmul(W2,torch.tensor(z1)) + b2).detach().numpy().T
else:
z = nonlinearity(torch.matmul(W1,inputs.T) + b1).detach().numpy().T
pred = torch.matmul(W,torch.tensor(z).T) + b
# z = net.enc.network[:-2](torch.tensor(inputs)).detach().numpy()
N = z.shape[1]
max_dichs = 50 # the maximum number of untrained dichotomies to test
all_PS = []
all_CCGP = []
all_CCGP_ = []
CCGP_out_corr = []
mut_inf = []
all_SD = []
indep = []
indep.append(task.subspace_information())
# z = this_exp.train_data[0].detach().numpy()
# z = linreg.predict(this_exp.train_data[0])@W1.T
n_compute = np.min([5000, z.shape[0]])
idx = np.random.choice(z.shape[0], n_compute, replace=False)
# idx_tst = idx[::4] # save 1/4 for test set
# idx_trn = np.setdiff1d(idx, idx_tst)
cond = inp_condition[idx]
# cond = util.decimal(this_exp.train_data[1][idx,...])
num_cond = len(np.unique(cond))
# xor = np.where(~(np.isin(range(num_cond), args['dichotomies'][0])^np.isin(range(num_cond), args['dichotomies'][1])))[0]
## Loop over dichotomies
# D = assistants.Dichotomies(num_cond, args['dichotomies']+[xor], extra=50)
# choose dichotomies to have a particular order
Q = num_var
D_fake = assistants.Dichotomies(num_cond, task.positives, extra=7000)
mi = np.array([task.information(p) for p in D_fake])
midx = np.append(range(Q),np.flip(np.argsort(mi[Q:]))+Q)
# these_dics = args['dichotomies'] + [D_fake.combs[i] for i in midx]
D = assistants.Dichotomies(num_cond, [D_fake.combs[i] for i in midx], extra=0)
clf = assistants.LinearDecoder(N, 1, assistants.MeanClassifier)
gclf = assistants.LinearDecoder(N, 1, svm.LinearSVC)
dclf = assistants.LinearDecoder(N, D.ntot, svm.LinearSVC)
# clf = LinearDecoder(this_exp.dim_input, 1, MeanClassifier)
# gclf = LinearDecoder(this_exp.dim_input, 1, svm.LinearSVC)
# dclf = LinearDecoder(this_exp.dim_input, D.ntot, svm.LinearSVC)
# K = int(num_cond/2) - 1 # use all but one pairing
K = int(num_cond/4) # use half the pairings
PS = np.zeros(D.ntot)
CCGP = [] #np.zeros((D.ntot, 100))
out_corr = []
d = np.zeros((n_compute, D.ntot))
pos_conds = []
for i, pos in tqdm(enumerate(D)):
pos_conds.append(pos)
# print('Dichotomy %d...'%i)
# parallelism
PS[i] = D.parallelism(z[idx,:], cond, clf)
# CCGP
cntxt = D.get_uncorrelated(100)
out_corr.append(np.array([[(2*np.isin(p,c)-1).mean() for c in cntxt] for p in task.positives]))
CCGP.append(D.CCGP(z[idx,:], cond, gclf, cntxt, twosided=True))
# shattering
d[:,i] = D.coloring(cond)
# dclf.fit(z[idx_trn,:], d[np.isin(idx, idx_trn),:], tol=1e-5, max_iter=5000)
dclf.fit(z[idx,:], d, tol=1e-5)
if two_layers:
z1 = nonlinearity(torch.matmul(W1,inputs.T) + b1)
z = nonlinearity(torch.matmul(W2,z1) + b2).detach().numpy().T
else:
z = nonlinearity(torch.matmul(W1,inputs.T) + b1).detach().numpy().T
# z = this_exp.test_data[0].detach().numpy()
# z = linreg.predict(this_exp.test_data[0])@W1.T
idx = np.random.choice(ndat, n_compute, replace=False)
d_tst = np.array([D.coloring(inp_condition[idx]) for _ in D]).T
SD = dclf.test(z[idx,:], d_tst).squeeze()
all_PS.append(PS)
all_CCGP.append(CCGP)
CCGP_out_corr.append(out_corr)
all_SD.append(SD)
mut_inf.append(mi[midx])
R = np.repeat(np.array(CCGP_out_corr),2,-1)
basis_dependence = np.array(indep).max(1)
out_MI = np.array(mut_inf)
# %%
# mask = (R.max(2)==1) # context must be an output variable
# mask = (np.abs(R).sum(2)==0) # context is uncorrelated with either output variable
# mask = (np.abs(R).sum(2)>0) # context is correlated with at least one output variable
mask = ~np.isnan(R).max(2) # context is uncorrelated with the tested variable
almost_all_CCGP = util.group_mean(np.squeeze(all_CCGP).squeeze(), mask)
PS = util.group_mean(np.squeeze(all_PS), mask.sum(-1)>0, axis=0)
CCGP = util.group_mean(almost_all_CCGP, mask.sum(-1)>0, axis=0)
SD = util.group_mean(np.squeeze(all_SD), mask.sum(-1)>0, axis=0)
# SD = np.array(all_SD).mean(0)
ndic = len(PS)
PS_err = np.nanstd(np.squeeze(all_PS), axis=0)#/np.sqrt(len(all_PS))
CCGP_err = np.nanstd(almost_all_CCGP, axis=0)#/np.sqrt(len(all_CCGP))
SD_err = np.nanstd(np.squeeze(all_SD), axis=0)#/np.sqrt(len(all_SD))
output_dics = []
for d in task.positives:
output_dics.append(np.where([(list(p) == list(d)) or (list(np.setdiff1d(range(num_cond),p))==list(d))\
for p in pos_conds])[0][0])
input_dics = []
for d in input_task.positives:
input_dics.append(np.where([(list(p) == list(d)) or (list(np.setdiff1d(range(num_cond),p))==list(d))\
for p in pos_conds])[0][0])
dicplt.dichotomy_plot(PS, CCGP, SD,
input_dics=input_dics, output_dics=output_dics,
other_dics=[pos_conds.index((0,2,5,7))], out_MI=out_MI.mean(0))
#%%
if two_layers:
z1 = nonlinearity(torch.matmul(W1,inputs.T) + b1).detach().numpy()
z = nonlinearity(torch.matmul(W2,torch.tensor(z1)) + b2).detach().numpy().T
else:
z = nonlinearity(torch.matmul(W1,inputs.T) + b1).detach().numpy().T
x_ = np.stack([inputs[inp_condition==i,:].mean(0).detach().numpy() for i in np.unique(conds)]).T
y_ = np.stack([targets[inp_condition==i,:].mean(0).detach().numpy() for i in np.unique(conds)]).T
z_ = np.stack([z[inp_condition==i,:].mean(0) for i in np.unique(conds)]).T
dx = la.norm(x_[:,:,None] - x_[:,None,:], axis=0)/2
dy = la.norm(y_[:,:,None] - y_[:,None,:], axis=0)
dz = la.norm(z_[:,:,None] - z_[:,None,:], axis=0)
# Kx = np.einsum('i...k,j...k->ij...', x_.T-x_.mean(1,keepdims=True).T, x_.T-x_.mean(1,keepdims=True).T)
# Ky = np.einsum('i...k,j...k->ij...', y_.T-y_.mean(1,keepdims=True).T, y_.T-y_.mean(1,keepdims=True).T)
# Kz = np.einsum('i...k,j...k->ij...', z_.T-z_.mean(1,keepdims=True).T, z_.T-z_.mean(1,keepdims=True).T)
Kx = util.dot_product(x_-x_.mean(1,keepdims=True), x_-x_.mean(1,keepdims=True))
Ky = util.dot_product(y_-y_.mean(1,keepdims=True), y_-y_.mean(1,keepdims=True))
Kz = util.dot_product(z_-z_.mean(1,keepdims=True), z_-z_.mean(1,keepdims=True))
#%%
x_ = np.stack([inputs[inp_condition==i,:].mean(0).detach().numpy() for i in np.unique(conds)]).T
y_ = np.stack([targets[inp_condition==i,:].mean(0).detach().numpy() for i in np.unique(conds)]).T
# x_ = inputs.detach().numpy().T
# y_ = targets.detach().numpy().T
rep = np.einsum('abi,ic->abc',weights,x_)
pred = np.einsum('aib,i->ab',nonlinearity(torch.tensor(rep)+torch.tensor(biases)[:,:,None]),W.squeeze())
f_z = nonlinearity.deriv(torch.tensor(rep+biases[:,:,None]))
err = torch.tensor(y_) - nn.Sigmoid()(torch.tensor(pred))
lin_grad = err[:,:,None,None]*W[None,:,:,None]
nonlin_grad = ((lin_grad.squeeze()*f_z.transpose(1,2)))
dw_lin = lin_grad*x_.T[None,:,None,:]
dw_nonlin = nonlin_grad[...,None]*x_.T[None,:,None,:]
#%% initialization-averaged
# this_nonlin = RayLou()
this_nonlin = TanAytch()
N_grid = 21
this_range = np.abs(weights).max()
# this_range=1
# this_bias = np.random.randn(N_grid**2,1)*0.1
this_bias = np.ones((N_grid**2,1))*0.1
err_avg = y_ - y_.mean()
x_avg = x_ - x_.mean(1,keepdims=True)
wa, wb = np.meshgrid(np.linspace(-this_range,this_range,N_grid),np.linspace(-this_range,this_range,N_grid))
fake_W = np.stack([wa.flatten(),wb.flatten()]).T
fake_fz = this_nonlin.deriv(torch.tensor(fake_W@x_ + this_bias)).numpy()
fake_grads = x_avg@(err_avg*fake_fz).T
plt.quiver(fake_W[:,0],fake_W[:,1],fake_grads[0,:],fake_grads[1,:], color=(0.5,0.5,0.5))
#%%
n_mds = 3
n_compute = 500
fake_task = util.RandomDichotomies(num_cond,num_var,0)
fake_task.positives = task.positives
idx = np.random.choice(inputs.shape[0], n_compute, replace=False)
if two_layers:
z1 = nonlinearity(torch.matmul(W1,inputs[idx,:].T) + b1).detach().numpy().T
z = nonlinearity(torch.matmul(W2,z1) + b2)
else:
z = nonlinearity(torch.matmul(W1,inputs[idx,:].T) + b1).detach().numpy().T
# ans = this_exp.train_data[1][idx,...]
ans = fake_task(inp_condition[:ndat])[idx]
cond = util.decimal(ans)
# cond = this_exp.train_conditions[idx]
# colorby = cond
colorby = inp_condition[idx]
# colorby = targets[idx,1]
# colorby = input_task(inp_condition)[idx,0].numpy()
mds = manifold.MDS(n_components=n_mds)
emb = mds.fit_transform(z)
if n_mds == 2:
scat = plt.scatter(emb[:,0],emb[:,1], c=colorby)
plt.xlabel('MDS1')
plt.ylabel('MDS2')
elif n_mds == 3:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.margins(0)
def init():
U = np.stack([emb[cond==i,:].mean(0) for i in np.unique(cond)])
qq = len(np.unique(cond))
for ix in combinations(range(qq),2):
ax.plot(U[ix,0],U[ix,1],U[ix,2],color=(0.5,0.5,0.5))
# ax.plot(U[[1,3],0],U[[1,3],1],U[[1,3],2],color=(0.5,0.5,0.5))
# ax.plot(U[[3,2],0],U[[3,2],1],U[[3,2],2],color=(0.5,0.5,0.5))
# ax.plot(U[[2,0],0],U[[2,0],1],U[[2,0],2],color=(0.5,0.5,0.5))
# ax.plot(U[[0,3],0],U[[0,3],1],U[[0,3],2],color=(0.5,0.5,0.5))
# ax.plot(U[[1,2],0],U[[1,2],1],U[[1,2],2],color=(0.5,0.5,0.5))
ax.scatter(U[:,0],U[:,1],U[:,2],s=50, marker='s',c=np.unique(cond))
scat = ax.scatter(emb[:,0],emb[:,1], emb[:,2], c=colorby)
util.set_axes_equal(ax)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
# plt.xticks([])
# plt.yticks([])
# plt.zticks([])
# plt.legend(np.unique(cond), np.unique(cond))
cb = plt.colorbar(scat,
ticks=np.unique(colorby),
drawedges=True,
values=np.unique(colorby))
cb.set_ticklabels(np.unique(colorby)+1)
cb.set_alpha(1)
cb.draw_all()
return fig,
# def init():
# ax.view_init(30,0)
# plt.draw()
# return ax,
def update(frame):
ax.view_init(30,frame)
# plt.draw()
return fig,
ani = anime.FuncAnimation(fig, update, frames=np.linspace(0, 360, 100),
init_func=init, interval=10, blit=True)
# plt.show()
ani.save(SAVE_DIR+'/vidya/tempmovie.mp4', writer=anime.writers['ffmpeg'](fps=30))
|
import datetime
import flask_testing
from sqlalchemy import desc
from monolith.app import create_app
from monolith.database import Story, User, db, ReactionCatalogue, Counter
from monolith.forms import LoginForm, StoryForm
from monolith.urls import *
class TestTemplateStories(flask_testing.TestCase):
app = None
# First thing called
def create_app(self):
global app
app = create_app(database=TEST_DB)
return app
# Set up database for testing here
def setUp(self) -> None:
with app.app_context():
# Create admin user
example = User()
example.firstname = 'Admin'
example.lastname = 'Admin'
example.email = 'example@example.com'
example.dateofbirth = datetime.datetime(2020, 10, 5)
example.is_admin = True
example.set_password('admin')
db.session.add(example)
db.session.commit()
# Create non admin user
example = User()
example.firstname = 'Abc'
example.lastname = 'Abc'
example.email = 'abc@abc.com'
example.dateofbirth = datetime.datetime(2010, 10, 5)
example.is_admin = False
example.set_password('abc')
db.session.add(example)
db.session.commit()
# Create another non admin user
example = User()
example.firstname = 'Nini'
example.lastname = 'Nini'
example.email = 'nini@nini.com'
example.dateofbirth = datetime.datetime(2010, 10, 7)
example.is_admin = False
example.set_password('nini')
db.session.add(example)
db.session.commit()
# Create an account that will have 0 stories
example = User()
example.firstname = 'No'
example.lastname = 'Stories'
example.email = 'no@stories.com'
example.dateofbirth = datetime.datetime(2010, 10, 5)
example.is_admin = False
example.set_password('no')
db.session.add(example)
db.session.commit()
# Create the first story, default from teacher's code
example = Story()
example.text = 'Trial story of example admin user :)'
example.author_id = 1
example.figures = '#example#admin#'
example.is_draft = False
example.date = datetime.datetime.strptime('2019-10-20', '%Y-%m-%d')
db.session.add(example)
db.session.commit()
# Create a story that shouldn't be seen in /latest
example = Story()
example.text = 'Old story (dont see this in /latest)'
example.date = datetime.datetime.strptime('2019-10-10', '%Y-%m-%d')
example.likes = 420
example.author_id = 2
example.is_draft = False
example.figures = '#example#abc#'
db.session.add(example)
db.session.commit()
# Create a story that should be seen in /latest
example = Story()
example.text = 'You should see this one in /latest'
example.date = datetime.datetime.strptime('2019-10-13', '%Y-%m-%d')
example.likes = 3
example.author_id = 2
example.is_draft = False
example.figures = '#example#abc#'
db.session.add(example)
db.session.commit()
# Random draft from a non-admin user
example = Story()
example.text = 'DRAFT from not admin'
example.date = datetime.datetime.strptime('2018-12-30', '%Y-%m-%d')
example.likes = 100
example.author_id = 3
example.is_draft = True
example.figures = '#example#nini#'
db.session.add(example)
db.session.commit()
# Create a very old story for range searches purpose
example = Story()
example.text = 'very old story (11 11 2011)'
example.date = datetime.datetime.strptime('2011-11-11', '%Y-%m-%d')
example.likes = 2
example.author_id = 3
example.is_draft = False
example.figures = '#example#nini#'
example.date = datetime.datetime(2011, 11, 11)
db.session.add(example)
db.session.commit()
# Add third reaction (love)
love = ReactionCatalogue()
love.reaction_id = 3
love.reaction_caption = "love"
db.session.add(love)
db.session.commit()
# login
payload = {'email': 'example@example.com',
'password': 'admin'}
form = LoginForm(data=payload)
self.client.post('/users/login', data=form.data, follow_redirects=True)
# Executed at end of each test
def tearDown(self) -> None:
db.session.remove()
db.drop_all()
def test_existing_story(self):
self.client.get('/stories/1')
self.assert_template_used('story.html')
test_story = Story.query.filter_by(id=1).first()
self.assertEqual(self.get_context_variable('story'), test_story)
# Ordered reactions
reactions = [('dislike', 0), ('like', 0), ('love', 0)]
self.assert_context('reactions', reactions)
# Add reactions for user 1
like = Counter()
like.reaction_type_id = 1
like.story_id = 1
like.counter = 23
dislike = Counter()
dislike.reaction_type_id = 2
dislike.story_id = 1
dislike.counter = 5
db.session.add(like)
db.session.add(dislike)
db.session.commit()
# Test new statistics
self.client.get('/stories/1')
self.assert_template_used('story.html')
test_story = Story.query.filter_by(id=1).first()
self.assertEqual(self.get_context_variable('story'), test_story)
# Ordered reactions
reactions = [('dislike', 5), ('like', 23), ('love', 0)]
self.assert_context('reactions', reactions)
def test_non_existing_story(self):
self.client.get('/stories/50')
self.assert_template_used('story.html')
self.assertEqual(self.get_context_variable('exists'), False)
# Testing that the total number of users is >= than the number of latest stories per user (simple invariant)
def test_simple_latest_story(self):
self.client.get(LATEST_URL)
# Simply assert that the template used is the expected one
self.assert_template_used('stories.html')
# Check the invariant
num_users = len(db.session.query(User).all())
self.assertLessEqual(len(self.get_context_variable('stories')), num_users)
# Testing that the oldest story per user is contained in the resulting stories
def test_latest_story(self):
self.client.get(LATEST_URL)
# Get the number of users to iterate and filter per user
num_users = len(User.query.all())
expected_stories = []
for i in range(num_users):
# Get all the NON-draft stories of the i-th user and order them (in a descending order)
# then get the first one
non_draft = Story.query.filter(Story.author_id == i).filter(Story.is_draft == 0).order_by(
desc(Story.date)).first()
# If at least one story was retrieved (maybe a user has written 0 stories)
if non_draft:
# It's an expected story that must be returned by the service
expected_stories.append(non_draft)
# Get all the stories returned by the service
stories_returned = self.get_context_variable('stories')
# Check that they're the same
for i in range(len(expected_stories)):
self.assertEqual(stories_returned[i].id, expected_stories[i].id)
# Testing range story with possible inputs
def test_range_story(self):
# Testing range without parameters
# Expected behaviour: it should return ALL the stories
self.client.get(RANGE_URL)
self.assert_template_used('stories.html')
all_stories = db.session.query(Story).filter_by(is_draft=False).all()
self.assertEqual(self.get_context_variable('stories').all(), all_stories)
# Testing range with only one parameter (begin)
# Expected behaviour: it should return the stories starting from specified date to TODAY
self.client.get(RANGE_URL + '?begin=2013-10-10')
d = datetime.datetime.strptime('2013-10-10', '%Y-%m-%d')
req_stories = Story.query.filter(Story.date >= d).filter_by(is_draft=False).all()
self.assertEqual(self.get_context_variable('stories').all(), req_stories)
# Testing range with only one parameter (end)
# Expected behaviour: it should return all the stories BEFORE the specified date
self.client.get(RANGE_URL + '?end=2013-10-10')
e = datetime.datetime.strptime('2013-10-10', '%Y-%m-%d')
req_stories = Story.query.filter(Story.date <= e).filter_by(is_draft=False).all()
self.assertEqual(self.get_context_variable('stories').all(), req_stories)
# Testing range with begin date > end date
self.client.get(RANGE_URL + '?begin=2012-12-12&end=2011-10-10')
self.assert_message_flashed('Begin date cannot be higher than End date.', 'error')
# Testing range with wrong url parameters
self.client.get(RANGE_URL + '?begin=abc&end=abc')
self.assert_message_flashed('Wrong URL parameters.', 'error')
# Testing range with a valid request
# Expected behaviour: return all the stories between the specified dates
d = datetime.datetime.strptime('2012-10-15', '%Y-%m-%d')
e = datetime.datetime.strptime('2020-10-10', '%Y-%m-%d')
self.client.get(RANGE_URL + '?begin=2012-10-15&end=2020-10-10')
req_stories = Story.query.filter(Story.date >= d).filter(Story.date <= e).filter_by(is_draft=False).all()
self.assertEqual(self.get_context_variable('stories').all(), req_stories)
class TestStories(flask_testing.TestCase):
app = None
# First thing called
def create_app(self):
global app
app = create_app(database=TEST_DB)
return app
# Set up database for testing here
def setUp(self) -> None:
with app.app_context():
# Create admin user (if not present)
q = db.session.query(User).filter(User.email == 'example@example.com')
user = q.first()
if user is None:
example = User()
example.firstname = 'Admin'
example.lastname = 'Admin'
example.email = 'example@example.com'
example.dateofbirth = datetime.datetime(2020, 10, 5)
example.is_admin = True
example.set_password('admin')
db.session.add(example)
db.session.commit()
# Create non admin user (if not present)
q = db.session.query(User).filter(User.email == 'abc@abc.com')
user = q.first()
if user is None:
example = User()
example.firstname = 'Abc'
example.lastname = 'Abc'
example.email = 'abc@abc.com'
example.dateofbirth = datetime.datetime(2010, 10, 5)
example.is_admin = False
example.set_password('abc')
db.session.add(example)
db.session.commit()
# Create the first story, default from teacher's code
q = db.session.query(Story).filter(Story.id == 1)
story = q.first()
if story is None:
example = Story()
example.text = 'Trial story of example admin user :)'
example.author_id = 1
example.figures = '#example#admin#'
example.is_draft = False
db.session.add(example)
db.session.commit()
# Create a story of a different user
q = db.session.query(Story).filter(Story.id == 2)
story = q.first()
if story is None:
example = Story()
example.text = 'You won\'t modify this story'
example.author_id = 2
example.figures = '#modify#story#'
example.is_draft = False
db.session.add(example)
db.session.commit()
# Create a draft for the logged user
q = db.session.query(Story).filter(Story.id == 3)
story = q.first()
if story is None:
example = Story()
example.text = 'This is an example of draft'
example.author_id = 1
example.figures = '#example#draft#'
example.is_draft = True
db.session.add(example)
db.session.commit()
# Create a draft of a different user
q = db.session.query(Story).filter(Story.id == 4)
story = q.first()
if story is None:
example = Story()
example.text = 'This is an example of draft that you can\'t modify'
example.date = datetime.datetime.strptime('2018-12-30', '%Y-%m-%d')
example.author_id = 2
example.figures = '#example#draft#'
example.is_draft = True
db.session.add(example)
db.session.commit()
payload = {'email': 'example@example.com', 'password': 'admin'}
form = LoginForm(data=payload)
self.client.post('/users/login', data=form.data, follow_redirects=True)
# Executed at end of each test
def tearDown(self) -> None:
db.session.remove()
db.drop_all()
def test_write_story(self):
# Testing writing without rolling dice
response = self.client.get(WRITE_URL)
self.assert_redirects(response, HOME_URL)
self.client.get(WRITE_URL, follow_redirects=False)
self.assert_template_used('index.html')
# Testing writing of a valid draft story
response = self.client.get(WRITE_URL + '/3')
self.assert200(response)
self.assert_template_used('write_story.html')
self.assert_context('words', ['example', 'draft'])
# Testing writing of other user's draft
response = self.client.get(WRITE_URL + '/4')
self.assert_redirects(response, 'http://127.0.0.1:5000/users/1/drafts')
# Testing writing of an already published story
response = self.client.get(WRITE_URL + '/1')
self.assert_redirects(response, 'http://127.0.0.1:5000/users/1/drafts')
# Testing writing of a new story with valid session
with self.client.session_transaction() as session:
session['figures'] = ['beer', 'cat', 'dog']
response = self.client.get(WRITE_URL)
self.assert200(response)
self.assert_template_used('write_story.html')
self.assert_context('words', ['beer', 'cat', 'dog'])
# Testing publishing invalid story
payload = {'text': 'my cat is drinking a gin tonic with my neighbour\'s dog', 'as_draft': '0'}
form = StoryForm(data=payload)
response = self.client.post('/stories/new/write', data=form.data)
self.assert400(response)
self.assert_template_used('write_story.html')
self.assert_context('message', 'Your story doesn\'t contain all the words. Missing: beer ')
# Testing publishing valid story
payload1 = {'text': 'my cat is drinking a beer with my neighbour\'s dog', 'as_draft': '0'}
form1 = StoryForm(data=payload1)
response = self.client.post('/stories/new/write', data=form1.data)
self.assertEqual(response.status_code, 302)
self.assert_redirects(response, '/users/1/stories')
# Testing saving a new story as draft
with self.client.session_transaction() as session:
session['figures'] = ['beer', 'cat', 'dog']
payload2 = {'text': 'my cat is drinking', 'as_draft': '1'}
form2 = StoryForm(data=payload2)
response = self.client.post('/stories/new/write', data=form2.data)
self.assertEqual(response.status_code, 302)
self.assert_redirects(response, '/users/1/drafts')
# Testing saving a draft again
with self.client.session_transaction() as session:
session['figures'] = ['beer', 'cat', 'dog']
session['id_story'] = 6
response = self.client.post('/stories/new/write', data=form2.data)
self.assertEqual(response.status_code, 302)
self.assert_redirects(response, '/users/1/drafts')
q = db.session.query(Story).filter(Story.id == 7).first()
self.assertEqual(q, None)
# Testing publishing a draft story
with self.client.session_transaction() as session:
session['figures'] = ['beer', 'cat', 'dog']
session['id_story'] = 6
payload3 = {'text': 'my cat is drinking dog and beer', 'as_draft': '0'}
form3 = StoryForm(data=payload3)
response = self.client.post('/stories/new/write', data=form3.data)
self.assertEqual(response.status_code, 302)
self.assert_redirects(response, '/users/1/stories')
q = db.session.query(Story).filter(Story.id == 7).first()
self.assertEqual(q, None)
q = db.session.query(Story).filter(Story.id == 6).first()
self.assertEqual(q.is_draft, False)
class TestRandomRecentStory(flask_testing.TestCase):
app = None
# First thing called
def create_app(self):
global app
app = create_app(database=TEST_DB)
return app
# Set up database for testing here
def setUp(self) -> None:
with app.app_context():
# Create an user with no stories
q = db.session.query(User).filter(User.email == 'example@example.com')
user = q.first()
if user is None:
example = User()
example.firstname = 'Admin'
example.lastname = 'Admin'
example.email = 'example@example.com'
example.dateofbirth = datetime.datetime(2020, 10, 5)
example.is_admin = True
example.set_password('admin')
db.session.add(example)
db.session.commit()
# Create another user
q = db.session.query(User).filter(User.email == 'example2@example.com')
user = q.first()
if user is None:
example = User()
example.firstname = 'Admin2'
example.lastname = 'Admin2'
example.email = 'example2@example.com'
example.dateofbirth = datetime.datetime(2020, 10, 5)
example.is_admin = True
example.set_password('admin')
db.session.add(example)
db.session.commit()
# Create a not recent story by Admin2
example = Story()
example.text = 'This is a story about the end of the world'
example.date = datetime.datetime.strptime('2012-12-12', '%Y-%m-%d')
example.author_id = 2
example.figures = 'story#world'
example.is_draft = False
db.session.add(example)
db.session.commit()
# Create a recent story saved as draft by Admin2
example = Story()
example.text = 'This story is just a draft'
example.date = datetime.datetime.now()
example.author_id = 2
example.figures = 'story#draft'
example.is_draft = True
db.session.add(example)
db.session.commit()
# Create a recent story by Admin
example = Story()
example.text = 'Just another story'
example.date = datetime.datetime.now()
example.author_id = 1
example.figures = 'dice#example'
example.is_draft = False
db.session.add(example)
db.session.commit()
def test_random_recent_story(self):
# Random recent story as anonymous user
self.client.get('/stories/random', follow_redirects=True)
self.assert_template_used('story.html')
self.assertEqual(self.get_context_variable('story').text, 'Just another story')
# Login as Admin
payload = {'email': 'example@example.com', 'password': 'admin'}
form = LoginForm(data=payload)
self.client.post('/users/login', data=form.data, follow_redirects=True)
# No recent stories
self.client.get('/stories/random', follow_redirects=True)
self.assert_template_used('stories.html')
self.assert_message_flashed('Oops, there are no recent stories by other users!')
# Create a new recent story by Admin2
example = Story()
example.text = 'This is a valid recent story'
example.date = datetime.datetime.now()
example.author_id = 2
example.figures = 'story#recent'
example.is_draft = False
db.session.add(example)
db.session.commit()
# Get the only recent story not written by Admin
response = self.client.get('/stories/random', follow_redirects=True)
self.assert_template_used('story.html')
self.assertEqual(self.get_context_variable('story').text, 'This is a valid recent story')
|
# Generated by Django 2.1.7 on 2019-03-22 20:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('equipaments', '0008_clients_date'),
]
operations = [
migrations.AddField(
model_name='clients',
name='planos',
field=models.CharField(choices=[('3MB', '3MB'), ('5MB', '5MB'), ('10MB', '10MB'), ('15MB', '15MB'), ('20MB', '20MB'), ('25MB', '25MB'), ('50MB', '50MB')], default='3MB', max_length=5),
preserve_default=False,
),
]
|
#!/usr/bin/Python
# -*- coding: utf-8 -*-
import uiautomator2 as ut2
ip_list =['10.2.8.138:7912','10.2.8.113:7912','10.2.8.34:7912']
url = 'http://10.0.4.14:9257/dev/android_cn/'
#apkName = 'snqz_banshu_0.0.0.008_1711071629.apk'
apkName ='snqz_test_0.0.0.013_1801241755.apk'
pack_name = ['com.jingmo.snqz','com.snqz.union']
dev_packname ='com.snqz.union'
jm_packname='com.jingmo.snqz'
#flag = raw_input("dev:1 : jingmo : 2 \n")
def apk_install(ip):
u = ut2.connect(ip)
print u.device_info
print u.info
print u.info.get('screenOn')
#u.unlock()
#u._default_session.screen_on()
u.app_install(url+apkName)
#u.push_url(url+apkName,'./storage/sdcard0/')
# if flag == 1:
# if u.app_isExist(dev_packname):
# u.app_uninstall(dev_packname)
# else:
# u.app_install(url+apkName)
# u.app_start(dev_packname)
# else:
# if u.app_isExist(jm_packname):
# u.app_uninstall(jm_packname)
# else:
# u.app_install(url+apkName)
# u.app_start(jm_packname)
#apk_install('10.2.8.148:7912')
apk_install('10.2.8.138:7912') |
#!/usr/bin/env python
import collections
import socket
import struct
import sys
import json
import time
MCAST_ADDR = "224.1.1.1"
MCAST_PORT = 5008
MULTICAST_TTL = 8
PAUSE = 0
PLAY = 1
JUMPTO = 2
GOTOURL = 3
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
#Get own ip address
def get_dev_ipaddr():
testsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
testsock.connect(("8.8.8.8", 80))
ipaddr = testsock.getsockname()[0]
testsock.close()
return ipaddr
#Create a multicast socket
def listen_socket():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("", MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_ADDR), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
return sock
#Send message to background script
def send_message(message):
#Modified from https://github.com/mdn/webextensions-examples/tree/master/native-messaging
encodedMessage = json.dumps(message).encode('utf-8')
# Write message size.
sys.stdout.buffer.write(struct.pack('@I', len(encodedMessage)))
# Write the message itself.
sys.stdout.buffer.write(encodedMessage)
sys.stdout.buffer.flush()
ownip = get_dev_ipaddr()
sock = listen_socket()
sock.setblocking(0)
#Create objects for data
nodes = {}
session = False
nodecommands = collections.defaultdict(list)
sessions = []
#Log file
f= open("log_" + str(time.time()).split(".")[0],"a+")
rowcount = 0
while True:
#Get messages from the socket buffer
try:
data, client = sock.recvfrom(10240)
data = data.decode("utf-8")
send_message("Received " + data)
f.write(data + "\n")
f.flush()
rowcount+=1
if rowcount > 5000:
#New log file
f.close()
f= open("log_" + str(time.time()).split(".")[0],"a+")
rowcount=0
#Application logic
except BlockingIOError:
time.sleep(0.1)
swarmsize = len(nodes) #how many nodes in group
if swarmsize == 0:
#nothing happening
continue
#No message in buffer
#Check status of nodecommands
#if this node has to do something, send message to extension
keylist = list(nodecommands.keys())
for cmd in keylist:
#if the command is over 30 seconds old, remove it or if nodes have disconnected/been added, remove it
if (time.time() - nodecommands[cmd][0][0] > 30 or swarmsize != nodecommands[cmd][0][1]):
nodecommands.pop(cmd)
continue
#if over half of the nodes agree
if (len(nodecommands[cmd]) - 1 > swarmsize / 2):
if (cmd.split(";")[0] == ownip):
send_message(cmd.split(";", 1)[1])
nodecommands.pop(cmd)
#Check status of nodes
#if logic dictates a node needs to do something, send message using socket to all nodes
#send_message("Commands checked, start logic")
#Check the video URL
videos = []
vid_dict = {}
for key in nodes.keys():
video = nodes[key]["baseURI"]
videos.append((video, key))
if video not in vid_dict:
vid_dict[video] = 1
else:
vid_dict[video] += 1
#send_message("Vid dict built")
if len(vid_dict) > 1:
max_amount = -9999
for key in vid_dict.keys():
amount = vid_dict[key]
max_amount = max(amount, max_amount)
if max_amount == vid_dict[key]:
real_vid = key
for key in nodes.keys():
if nodes[key]['baseURI'] != real_vid:
msg = {"command": key + ";" + str(GOTOURL) + ";" + real_vid}
jstring = json.dumps(msg)
sock.sendto((sessionid + ";" + jstring).encode("utf-8"), (MCAST_ADDR, MCAST_PORT))
#send_message("URLs checked")
#Check the video timestamps
timestamps = []
for key in nodes.keys():
timestamps.append(nodes[key]['currentTime'])
#Only do something if the difference between different timestamps is over 3 seconds
if max(timestamps) - min(timestamps) > 3:
#Average of the timestamps
avr_timestamp = sum(timestamps) / len(timestamps)
distances = []
#The node with the minimum distance to the average timestamp is "in the right timespot"
for key in nodes.keys():
distance_timestamp = abs(nodes[key]['currentTime'] - avr_timestamp)
distances.append((distance_timestamp, key))
real_node = min(distances, key = lambda t: t[0])
real_timestamp = (nodes[real_node[1]]['currentTime'], real_node[1])
#If a node is over 3 seconds away from the "correct timestamp", multicast
for key in nodes.keys():
if abs(real_timestamp[0] - nodes[key]['currentTime']) > 3:
msg = {'command': key + ";" + str(JUMPTO) + ";" + str(real_timestamp[0])}
jstring = json.dumps(msg)
sock.sendto((sessionid + ";" + jstring).encode("utf-8"), (MCAST_ADDR, MCAST_PORT))
#send_message("Timestamps checked")
#Check if nodes are paused/playing
videostates = {0: 0, 1: 0}
for key in nodes.keys():
if nodes[key]["paused"] == 0:
videostates[0] += 1
else:
videostates[1] += 1
if videostates[0] > videostates[1]:
#Session agreement is PAUSE
for key in nodes.keys():
if nodes[key]["paused"] == 1:
msg = {"command": key + ";" + str(PLAY)}
jstring = json.dumps(msg)
sock.sendto((sessionid + ";" + jstring).encode("utf-8"), (MCAST_ADDR, MCAST_PORT))
elif videostates[1] > videostates[0]:
#Session agreement is PLAY
for key in nodes.keys():
if nodes[key]["paused"] == 0:
msg = {"command": key + ";" + str(PAUSE)}
jstring = json.dumps(msg)
sock.sendto((sessionid + ";" + jstring).encode("utf-8"), (MCAST_ADDR, MCAST_PORT))
#send_message("Logic finished")
#Application logic finished, check socket buffer again
#time.sleep(0.1)
continue
#send_message()
else:
#Message was in buffer
try:
sessionid, obj = data.split(";", 1)
#send_message("Split to " + sessionid + " and " + obj)
except ValueError:
#Invalid message
continue
#send_message(client)
if sessionid not in sessions:
sessions.append(sessionid)
msg = {}
msg['sessions'] = sessions
send_message('sessions;' + json.dumps(msg))
if not session:
#send_message("Comparing: " + client[0] + " vs " + ownip)
if client[0] == ownip:
send_message("Session chosen = " + sessionid)
session = sessionid
else:
continue
if sessionid != session:
continue
if obj == "0":
#Force pause command
send_message(str(PAUSE))
continue
elif obj == "1":
#Force play command
send_message(str(PLAY))
continue
try:
json_obj = json.loads(obj)
except ValueError:
#Not json
continue
#send_message("Obj loaded")
try:
nodecmd = json_obj["command"]
#Commands are added to object here
if client not in nodecommands[nodecmd]:
if len(nodecommands[nodecmd]) == 0:
nodecommands[nodecmd].append((time.time(), len(nodes))) #first timestamp, if this is too old remove commands
nodecommands[nodecmd].append(client) #Check length of this list for agreement between nodes, length is amount of nodes + 1
except KeyError:
#No commands, json is the latest status of a node
json_obj["receiveTime"] = time.time()
keylist = list(nodes.keys())
for key in keylist:
if time.time() - nodes[key]["receiveTime"] > 10: #Node has been lost
nodes.pop(key)
nodes[client[0]] = json_obj
continue
|
from lib.base import BaseGithubAction
from lib.formatters import repo_to_dict
__all__ = [
'GetRepoAction'
]
class GetRepoAction(BaseGithubAction):
def run(self, user, repo, base_url):
if base_url == None:
self._reset(user)
else:
self._reset(user+'|'+base_url)
user = self._client.get_user(user)
repo = user.get_repo(repo)
result = repo_to_dict(repo=repo)
return result
|
from ..main import utils
def test_mac_os():
os = 'mac_os'
res = utils.get_user_details(os)
assert res['name'] == 'Chris'
assert res['surname'] == 'Mipi'
def test_windows():
os = 'windows'
res = utils.get_user_details(os)
assert res['name'] == 'Makhabane'
assert res['surname'] == 'Mipi'
def test_linux():
os = 'linux'
res = utils.get_user_details(os)
assert res['name'] == 'Christopher'
assert res['surname'] == 'Mipi'
def test_ip_localhost():
assert utils.valid_ip_address('127.0.0.1') == False
assert utils.valid_ip_address('0.0.0.0') == False
def test_ip_valid_ip():
assert utils.valid_ip_address('41.144.74.153') == True
|
number = 3
tries = 0
guess = int(input("Guess a number"))
for tries in range (0, 2):
if number > guess:
guess = int(input("Guess higher"))
elif number < guess:
guess = int(input("Guess lower"))
print ("the correct number is 3")
|
# declare tuple of names and print
nametuple = ("Joe", "Sally", "Liam", "Robert", "Emma", "Isabella")
print("Contents of nametuple is: ", nametuple)
# tuple items can be accessed via [] operator
# note that index in [] is zero based
print("Tuple element at index 1: ", nametuple[1])
# index to access tuple can be negative
# negative index means beginning from the end
print("Tuple element at index -1: ", nametuple[-1])
# index can be also specified as a range
# range parameters are start index (inclusive) and end index (exclusive)
print("Tuple elements at range 2:5: ", nametuple[2:5])
# items of the tuple cannot be modified after its declared, though
# it is possible to convert tuple to list, modify the list and
# convert list back to tuple
namelist = list(nametuple)
namelist[1] = "Mary"
nametuple = tuple(namelist)
print("Contents of nametuple is: ", nametuple)
# tuple with only one item must be declared with trailing comma
nametuple = ("Joe",)
print(type(nametuple))
# note that on below the variable is not a tuple
nametuple = ("Joe")
print(type(nametuple))
|
USAGE="""
Creates the heuristic hybrid index given a threshold argument.
"""
import pandas as pd
import numpy as np
import argparse, os, logging, sys
import dev_capacity_calculation_module
if os.getenv('USERNAME') =='ywang':
M_DIR = 'M:\\Data\\Urban\\BAUS\\PBA50\\Draft_Blueprint\\Base zoning'
GITHUB_PETRALE_DIR = 'C:\\Users\\{}\\Documents\\GitHub\\petrale\\'.format(os.getenv('USERNAME'))
elif os.getenv('USERNAME') =='lzorn':
M_DIR = 'M:\\Data\\Urban\\BAUS\\PBA50\\Draft_Blueprint\\Base zoning'
GITHUB_PETRALE_DIR = 'X:\\petrale\\'.format(os.getenv('USERNAME'))
# input file locations
PLU_BOC_M_DIR = os.path.join(M_DIR, 'outputs')
JURIS_CAPACITY_FILE = os.path.join(PLU_BOC_M_DIR, '2020_06_03_juris_basis_pba40_capacity_metrics.csv')
# output file
OUTPUT_FILE = os.path.join(GITHUB_PETRALE_DIR, 'policies\\plu\\base_zoning\\hybrid_index', 'idx_urbansim_heuristic.csv')
LOG_FILE = os.path.join(GITHUB_PETRALE_DIR, 'policies\\plu\\base_zoning\\hybrid_index', 'idx_urbansim_heuristic.log')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=USAGE, formatter_class=argparse.RawDescriptionHelpFormatter,)
parser.add_argument("threshold", type=float, help="Threshold for capacity metric percentage change used to accept BASIS for a jurisdiction; should be between 0.0 and 1.0")
args = parser.parse_args()
if args.threshold <= 0 or args.threshold >= 1.0:
print("Expect threshold in (0,1)")
sys.exit()
# create logger
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
# console handler
ch = logging.StreamHandler()
ch.setLevel('INFO')
ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p'))
logger.addHandler(ch)
# file handler
fh = logging.FileHandler(LOG_FILE, mode='w')
fh.setLevel('DEBUG')
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p'))
logger.addHandler(fh)
logger.info("JURIS_CAPACITY_FILE = {}".format(JURIS_CAPACITY_FILE))
logger.info("THRESHOLD = {}".format(args.threshold))
# Read jurisdiction capacity metrics
capacity_juris_pba40_basis = pd.read_csv(JURIS_CAPACITY_FILE)
logger.info("Read {} lines from {}; head:\n{}".format(len(capacity_juris_pba40_basis), JURIS_CAPACITY_FILE, capacity_juris_pba40_basis.head()))
logger.debug("dtypes:\n{}".format(capacity_juris_pba40_basis.dtypes))
# pull jurisdictions to start the index dataframe we're building
index_df = capacity_juris_pba40_basis[["juris_zmod"]].drop_duplicates()
logger.debug("Have {} unique jurisdictions".format(len(index_df)))
# intensity variables first
for variable in dev_capacity_calculation_module.INTENSITY_CODES + dev_capacity_calculation_module.ALLOWED_BUILDING_TYPE_CODES:
# does it affect residential?
is_res = False
if variable in ["dua","height"]+dev_capacity_calculation_module.RES_BUILDING_TYPE_CODES:
is_res = True
# does it affect non-residential?
# Note: it can be both res and non-res.
# Also, strictly speaking, height doesn't really affect either since it affects
# the imputation of dua and far, so this will effectively turn on BASIS for height
is_nonres = False
if variable in ["far","height"]+dev_capacity_calculation_module.NONRES_BUILDING_TYPE_CODES:
is_nonres = True
logger.info("Setting hybrid index for variable {:10} res? {:5} nonres? {:5}".format(variable, is_res, is_nonres))
# variable index name - for allowed development types, it just has a suffix "_idx"
variable_idx = "{}_idx".format(variable)
# for intensity variables, it has max_XX_idx
if variable in dev_capacity_calculation_module.INTENSITY_CODES:
variable_idx = "max_{}_idx".format(variable)
# intensity have proportion variables too --- set to 1.0
index_df["proportion_adj_{}".format(variable)] = 1.0
# pull the select rows from capacity_juris_pba40_basis relevant for this variable
capacity_juris_var = capacity_juris_pba40_basis.loc[ capacity_juris_pba40_basis['variable'] == variable, ].copy()
# default to PBA40
capacity_juris_var[variable_idx] = dev_capacity_calculation_module.USE_PBA40
# for variables that are res and nonres, require units AND sqft to be within threshold
if is_res and is_nonres:
capacity_juris_var.loc[ ((abs(capacity_juris_var.units_basis - capacity_juris_var.units_pba40) / capacity_juris_var.units_pba40) <= args.threshold) &
((abs(capacity_juris_var.Ksqft_basis - capacity_juris_var.Ksqft_pba40) / capacity_juris_var.Ksqft_pba40) <= args.threshold),
variable_idx ] = dev_capacity_calculation_module.USE_BASIS
# for res variables, require units to be within threshold
elif is_res:
capacity_juris_var.loc[ (abs(capacity_juris_var.units_basis - capacity_juris_var.units_pba40) / capacity_juris_var.units_pba40) <= args.threshold,
variable_idx ] = dev_capacity_calculation_module.USE_BASIS
# for nonres variables, require sqft to be within threshold
elif is_nonres:
capacity_juris_var.loc[ (abs(capacity_juris_var.Ksqft_basis - capacity_juris_var.Ksqft_pba40) / capacity_juris_var.Ksqft_pba40) <= args.threshold,
variable_idx ] = dev_capacity_calculation_module.USE_BASIS
# bring into index_df
index_df = pd.merge(left=index_df, right=capacity_juris_var[["juris_zmod",variable_idx]])
# report out number of BASIS jurisdictions for each variable
# these should match the tableau
logger.info("Number of jurisdictions using BASIS variable:\n{}".format(index_df.sum()))
# rename jurisdiction
index_df.rename(columns = {'juris_zmod': 'juris_name'}, inplace = True)
# save it
index_df.to_csv(OUTPUT_FILE, index = False)
logger.info('Wrote {}'.format(OUTPUT_FILE))
|
while True:
try:
list_num = int(input())
list_ = input().split()
sort_ = int(input())
lise_new = list_[:list_num]
if sort_:
list_ = sorted(list_,reverse=True)
else:
list_ = sorted(list_)
print(" ".join(list_))
except:
break
|
import proxmox_api
import rpyc
import ec2_functions
import sys
import getpass
import multiprocessing
class EC2Service(rpyc.Service):
def on_connect(self, conn):
# code that runs when a connection is created
# (to init the service, if needed)
pass
def on_disconnect(self, conn):
# code that runs after the connection has already closed
# (to finalize the service, if needed)
pass
def exposed_make_vm_instance(self, public_key):
global proxmox
vm_id = proxmox.get_next_vm_id()
p = multiprocessing.Process( target = ec2_functions.vm_copy_and_setup, args = (public_key, proxmox, vm_id ) )
p.start()
# ec2_functions.vm_copy_and_setup(new_password, public_key, proxmox, vm_id )
return vm_id
def exposed_get_info(self, vm_id):
global proxmox
return ec2_functions.get_info(proxmox, vm_id)
def exposed_stop_vm(self, vm_id):
global proxmox
if proxmox.stop_vm("pve", vm_id) == True:
return "OK"
else:
return "ERROR"
def exposed_start_vm(self, vm_id):
global proxmox
if proxmox.start_vm("pve", vm_id) == True:
return "OK"
else:
return "ERROR"
def exposed_delete_vm(self, vm_id):
global proxmox
if ec2_functions.delete_vm(proxmox, vm_id) == True:
return "OK"
else:
return "ERROR"
proxmox = None
if __name__ == "__main__":
proxmox_ip = ""
if len( sys.argv ) < 2:
print("Must specify host name or IP of the proxmox server...")
exit(0)
else:
proxmox_ip = sys.argv[1]
proxmox = proxmox_api.ProxmoxAPI(proxmox_ip, False)
print("The beast slowly wakes up...")
print("Enter username: ", end="")
username = input()
print("Enter password: ", end="")
password = getpass.getpass()
if not proxmox.get_cookies(username, password):
print("Could not get cookies for proxmox....")
exit(-1)
username = None
password = None
print("")
print("Starting service...")
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(EC2Service, port=18861)
t.start() |
#Q1
olympics=( 'Beijing', 'London', 'Rio', 'Tokyo')
#Q2
tuples_lst = [('Beijing', 'China', 2008), ('London', 'England', 2012), ('Rio', 'Brazil', 2016, 'Current'), ('Tokyo', 'Japan', 2020, 'Future')]
country=[]
for list in tuples_lst:
country.append(list[1])
#Q3
olymp = ('Rio', 'Brazil', 2016)
city, country, year = 'Rio', 'Brazil', 2016
#Q4
def info( name, gender, age, bday_month, hometown):
return name, gender, age, bday_month, hometown
#Q5
gold = {'USA':31, 'Great Britain':19, 'China':19, 'Germany':13, 'Russia':12, 'Japan':10, 'France':8, 'Italy':8}
num_medals=[]
for medals in gold.items():
medal=medals[1]
num_medals.append(medal)
|
import cv2
import numpy as np
class NeuralNet:
SIGMOID, TANH = 0, 1
activation_map = [staticmethod.sigmoid_activation, staticmethod.tanh_activation]
'''For now, I assume all the hidden layers have the same amount of neurons (n_hidden)'''
def __init__(self, n_hidden_layers=1, n_input=2, n_output=2,
n_hidden=2, activition_function = SIGMOID):
self.n_hidden_layers = n_hidden_layers
self.n_input = n_input
self.n_output = n_output
self.n_hidden = n_hidden
'''learning rate'''
self.alpha = 0.5
'''First weight-set connects input layer to hidden layer 1'''
self.i_weight = np.random.random_sample((n_input, n_hidden)) * 2. - 1
# self.i_weight = np.array([[.15, .25], [.2, .3]])
# print 'i_weight:', self.i_weight
'''Other weight-sets connect 2 consecutive hidden layers together'''
'''Note: h_weight[i, j, k] mean the weight from layer i-th to layer (i + 1)-th'''
self.h_weight = np.random.random_sample((n_hidden_layers - 1, n_hidden, n_hidden)) * 2. - 1
# print 'h_weight:', self.h_weight
'''Last weight-set connects the last hidden layer to output layer'''
self.o_weight = np.random.random_sample((n_hidden, n_output)) * 2. - 1
# self.o_weight = np.array([[.4, .5], [.45, .55]])
# print 'o_weight:', self.o_weight
'''Biases is attached with hidden layers and output layer'''
self.h_bias = np.random.random_sample((self.n_hidden_layers, self.n_hidden)) * 2. - 1
self.o_bias = np.random.random_sample(self.n_output) * 2. - 1
# self.h_bias = np.array([[.35, .35]])
# self.o_bias = np.array([.6, .6])
# print 'h_bias:', self.h_bias
# print 'o_bias:', self.o_bias
self.x = None
self.h_net = np.zeros((self.n_hidden_layers, self.n_hidden), dtype=float)
self.h_out = np.zeros((self.n_hidden_layers, self.n_hidden), dtype=float)
self.y_net = np.zeros(self.n_output, dtype=float)
self.y_out = np.zeros(self.n_output, dtype=float)
def feedForward(self, x):
# print 'feedForward'
assert len(x) == self.n_input, ">>ERROR<< len(x) is different from self.n_input"
self.x = np.array(x)
'''Feed from input layer to the first hidden layer'''
for idx in range(self.n_hidden):
self.h_net[0, idx] = np.dot(self.x, self.i_weight[:, idx]) + self.h_bias[0, idx]
# print 'h_net:', self.h_net[0]
self.h_out[0] = self.ReLU_activation(self.h_net[0])
# print 'h_out:', self.h_out[0]
'''Feed between 2 consecutive hidden layers'''
for layer_idx in range(1, self.n_hidden_layers):
for neuron_idx in range(self.n_hidden):
self.h_net[layer_idx, neuron_idx] =\
np.dot(self.h_out[layer_idx - 1, :], self.h_weight[layer_idx - 1][:, neuron_idx])\
+ self.h_bias[layer_idx, neuron_idx]
self.h_out[layer_idx] = self.ReLU_activation(self.h_net[layer_idx])
'''Feed from the last hidden layer to output layer'''
for idx in range(self.n_output):
self.y_net[idx] = np.dot(self.h_out[self.n_hidden_layers - 1, :], self.o_weight[:, idx]) \
+ self.o_bias[idx]
self.y_out = self.ReLU_activation(self.y_net)
print 'y_out:', self.y_out
def backPropagation(self, target):
# print 'Back Propagation'
assert len(target) == self.n_output, ">>ERROR<< len(y) is different from self.n_output"
target = np.array(target)
'''Evaluate error'''
error = 0.5 * np.power(target - self.y_out, 2)
# print 'error:', error
print 'total error:', np.sum(error)
'''Back-propagate from output layer, and evaluate error for the next phase (last hidden layer)'''
new_error = np.zeros(self.n_hidden, dtype=float)
for j in range(self.n_output):
'''Calculate error for the last hidden layer'''
new_error += (target[j] - self.y_out[j]) * self.derivative_ReLU(self.y_net[j])\
* self.o_weight[:, j]
'''Optimize weights'''
self.o_weight[:, j] += self.alpha * (target[j] - self.y_out[j])\
* self.derivative_ReLU(self.y_net[j])\
* self.h_out[self.n_hidden_layers - 1, :]
'''Back-propagate and evaluate error for pairs the hidden layers'''
for k in range(self.n_hidden_layers - 2, -1, -1):
error, new_error = new_error, np.zeros(self.n_hidden, dtype=float)
for j in range(self.n_hidden):
new_error += error[j] * self.derivative_ReLU(self.h_net[k + 1, j])\
* self.h_weight[k, :, j]
self.h_weight[k, :, j] += self.alpha * error[j]\
* self.derivative_ReLU(self.h_net[k + 1, j])\
* self.h_out[k, :]
'''Back-propagate to the input layer'''
error, new_error = new_error, np.zeros(self.n_hidden, dtype=float)
for j in range(self.n_hidden):
# self.i_weight[:, j] += self.alpha * self.i_weight[:, j] * error[j]
self.i_weight[:, j] += self.alpha * error[j]\
* self.derivative_ReLU(self.h_net[0, j])\
* self.x[:]
# print 'i_weight:', self.i_weight
'''DONE'''
'''This implementation use sigmoid function for Activation'''
@staticmethod
def sigmoid_activation(self, val):
'''val needs to be (a scalar) or (a numpy array)'''
# return 1. / (1. + np.exp(val * -1))
@staticmethod
def derivative_sigmoid(self, val):
'''val needs to be a scalar'''
sig = NeuralNet.sigmoid_activation(val)
return sig * (1. - sig)
'''Tanh activation function'''
@staticmethod
def tanh_activation(self, val):
return np.tanh(val)
@staticmethod
def derivative_tanh(self, val):
tanh = NeuralNet.tanh_activation(val)
return 1. - tanh * tanh
'''ReLU activation function'''
def ReLU_activation(self, val):
if np.isscalar(val):
return val if val >= 0 else 0
tmp = np.copy(val)
tmp[tmp < 0] = 0
return tmp
def derivative_ReLU(self, val):
return 1. if val >= 0 else 0
if __name__ == '__main__':
nn = NeuralNet(n_hidden_layers=1, n_hidden=11)
x = [0.05, 0.1]
y = [.01, .99]
for i in range(2):
nn.feedForward(x)
nn.backPropagation(y) |
str = "sumit sudalkar"
print(str.capitalize())
str1 = "PYTHON NEED MORE PRACTICE"
a = str1.casefold()
print(a)
str2 = "It is example of count, count the number of string"
b = str2.count("count")
print(b)
str3 = "Align"
c = str3.center(30)
print(c)
str4 = "Working on Python"
x = str4.encode()
print(x)
str5 = "It is example of Encode"
d = str5.encode()
print(d)
str6 = "This is a endswith method."
e = str6.endswith(".")
print(e)
str7 = "Hello, it's a practice"
f = str7.find("practice")
print(f)
str8 = "I am selling fruits in {price:.2f} rupees"
print(str8.format(price = 50))
str9 = "todays fruit sell 50kg"
g = str8.isdigit()
print(g)
str10 = "Demo String"
h = str10.isidentifier()
print(h)
str11 = "strin is in lowercase"
i = str11.islower()
print(i)
str12 = "72765464344"
j = str12.isnumeric()
print(j)
str13 = "When string have symbols like #? it is not printable, it it shows false"
k = str13.isprintable()
print(k)
str14 = "string having " " whitespace"
l = str14.isspace()
print(l)
str15 = "Text Have Title Text"
m = str15.istitle()
print(m)
str16 = "SUPPER MEANS ALL TEXT IN UPERCASE FORMAT"
n = str16.isupper()
print(n)
str17 = ("bhole", "vishwa", "raghu")
o = "nath!".join(str17)
print(o)
str18 = "Python"
p = str18.ljust(1)
print(p, "need more practice")
str19 = "LOWER ALL TEXT"
q = str19.lower()
print(q)
str20 = "Python"
r = str20.lstrip()
print("of all programming language", r, "is my favorite")
str21 = "convert all string in uppercase"
s = str21.upper()
print(s)
str22 = "similar as a capitalize method"
t = str22.title()
print(t)
str23 = "charactersisinalphabets"
u = str23.isalpha()
print(u)
str24 = "Welcome to the python class"
v = str24.split()
print(v)
str25 = "150"
w = str25.zfill(5)
print(w)
str26 = "Hi, my name is sumit"
x = str26.startswith("Hi")
print(x)
str27 = {104: 72};
y = "hello sir";
print(y.translate(str27));
str28 = "Small Text in Capital and Capital Text In Small"
z = str28.swapcase()
print(z) |
# Generated by Django 3.1.4 on 2021-11-12 08:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('Quiz', '0002_host_created_by'),
]
operations = [
migrations.AlterField(
model_name='host',
name='Created_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='QuestionsTITA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question', models.CharField(max_length=300)),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Quiz.host')),
],
),
migrations.CreateModel(
name='QuestionsMCQ',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question', models.CharField(max_length=300)),
('Option1', models.CharField(max_length=20)),
('Option2', models.CharField(max_length=20)),
('Option3', models.CharField(max_length=20)),
('Option4', models.CharField(max_length=20)),
('correct', models.CharField(max_length=20)),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Quiz.host')),
],
),
migrations.CreateModel(
name='Marks_Of_User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.FloatField()),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Quiz.host')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import os
from abc import ABC, abstractmethod
class base_sanitizer():
def __init__(self, ql):
self.ql = ql
@property
@staticmethod
@abstractmethod
def NAME():
pass
@abstractmethod
def enable(self):
pass
def verbose_abort(self):
self.ql.os.emu_error()
os.abort()
|
#from .alexnet import AlexNet
#from .lenet import LeNet5
#from .mobilenet_v2 import MobileNetV2
#from .mobilenet_v3 import MobileNetv3
#from .regnet import RegNet
#from .resnest import ResNeSt
#from .resnet import ResNet, ResNetV1d
#from .resnet_cifar import ResNet_CIFAR
#from .resnext import ResNeXt
#from .seresnet import SEResNet
#from .seresnext import SEResNeXt
#from .shufflenet_v1 import ShuffleNetV1
#from .shufflenet_v2 import ShuffleNetV2
#from .vgg import VGG
from .rednet import RedNet
__all__ = [
'LeNet5', 'AlexNet', 'VGG', 'RegNet', 'ResNet', 'ResNeXt', 'ResNetV1d',
'ResNeSt', 'ResNet_CIFAR', 'SEResNet', 'SEResNeXt', 'ShuffleNetV1',
'ShuffleNetV2', 'MobileNetV2', 'MobileNetv3',
'RedNet'
]
|
#!/usr/bin/env python3
# -*- coding:UTF-8 -*-
import sys
sys.path.append("../common/") # 将其他模块路径添加到系统搜索路径
import numpy as np
import tensorflow as tf
import time
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.exceptions import NotFittedError
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import RandomizedSearchCV
from tcn import TCN
from read_data import read_data, index_generator
tf.set_random_seed(42)
np.random.seed(42)
# 构建 TCN 模型类,为了兼容 scikit-learning 的 RandomizedSearchCV 类,后续可能实现超参数搜索
class TCNClassifier(BaseEstimator, ClassifierMixin):
def __init__(self,
sequence_length,
kernel_size,
num_channels=[30]*6,
dropout=0.5,
batch_size=16,
in_channels=32,
random_state=None,
learning_rate=0.001,
optimizer_class=tf.train.AdamOptimizer):
self.num_channels = num_channels
self.sequence_length = sequence_length
self.kernel_size = kernel_size
self.dropout = dropout
self.batch_size = batch_size
self.random_state = random_state
self.in_channels = in_channels
self.learning_rate = learning_rate
self.optimizer_class = optimizer_class
self._session = None
def _TCN(self, inputs, n_outputs, training):
'''构建 TCN 模型'''
outputs = TCN(inputs, n_outputs, self.num_channels,
self.sequence_length, self.kernel_size, self.dropout, is_training=training)
return outputs
def _bulid_graph(self, n_outputs):
'''构建计算图'''
if self.random_state is not None:
tf.set_random_seed(self.random_state)
np.random.seed(self.random_state)
inputs = tf.placeholder(tf.float32,
shape=(None, self.sequence_length, self.in_channels), name="inputs")
labels = tf.placeholder(tf.int32, shape=(None), name="labels")
self._training = tf.placeholder_with_default(False, shape=(), name="training") # 表示是训练阶段还是测试阶段
learning_rate_ = tf.placeholder(tf.float32, shape=(), name="learning_rate")
tcn_outputs = self._TCN(inputs, n_outputs, self._training)
predictions = tf.nn.softmax(tcn_outputs, name="predictions")
# 计算交叉熵
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=tcn_outputs)
loss = tf.reduce_mean(xentropy, name="loss")
# 构建优化器节点
optimizer = self.optimizer_class(learning_rate=learning_rate_)
training_op = optimizer.minimize(loss)
# 构建计算准确率节点
correct = tf.nn.in_top_k(tcn_outputs, labels, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy")
# 构建全局初始化节点和模型保存节点
init = tf.global_variables_initializer()
saver = tf.train.Saver()
self._X, self._y = inputs, labels
self._learning_rate = learning_rate_
self._predictions, self._loss = predictions, loss
self._training_op, self._accuracy = training_op, accuracy
self._init, self._saver = init, saver
def close_session(self):
if self._session:
self._session.close()
def _get_model_params(self):
'''获取所有变量值,用于 early stopping ,faster than saving to disk'''
with self._graph.as_default():
gvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)# 获取一个 list 包含所有的变量
return {gvar.op.name: value for gvar, value in zip(gvars, self._session.run(gvars))}
def _restore_model_params(self, model_params):
gvar_names = list(model_params.keys())
# 获取被给名字的操作(op)
assign_ops = {gvar_name: self._graph.get_operation_by_name(gvar_name + "/Assign") for gvar_name in gvar_names}
# inputs 是tf.Operation 的属性. The list of Tensor objects representing the data inputs of this op
init_values = {gvar_name: assign_op.inputs[1] for gvar_name, assign_op in assign_ops.items()}
# 由于 key 是 tensor ,所以 value 会替换为 key 对应的 tensor. 具体参考官网 tf.Session.run
feed_dict = {init_values[gvar_name]: model_params[gvar_name] for gvar_name in gvar_names}
self._session.run(assign_ops, feed_dict=feed_dict)
def fit(self, X, y, n_epochs, X_valid=None, y_valid=None, X_test=None, y_test=None):
'''Fit the model to the training set. If X_valid and y_valid are provided, use early stopping'''
self.close_session()
print("X test shape: ", X_test.shape)
print("y test shape: ", y_test.shape)
self.classes_ = np.unique(y)
n_outputs = len(self.classes_) # 获取输出的类别数
self.class_to_index_ = {label:index for index, label in enumerate(self.classes_)}
y = np.array([self.class_to_index_[label] for label in y], dtype=np.int32)
self.y_test_classes_ = np.unique(y_test)
y_test_n_outputs = len(self.y_test_classes_) # 获取输出的类别数
self.y_test_class_to_index_ = {label:index for index, label in enumerate(self.y_test_classes_)}
y_test = np.array([self.y_test_class_to_index_[label] for label in y_test], dtype=np.int32)
self._graph = tf.Graph()
with self._graph.as_default():
self._bulid_graph(n_outputs) # 构建计算模型
# 下面几个变量用于 early stopping
max_check_without_progress = 20
checks_without_progress = 0
best_loss = np.infty
best_params = None
# 开始训练阶段
best_acc = 0 # 测试集最好的准确率
seed = 0
self._session = tf.Session(graph=self._graph)
with self._session.as_default() as sess:
sess.run(self._init)
for epoch in range(n_epochs):
seed += 1
if epoch != 0 and epoch // 100 != 0:
self.learning_rate = 0.0002
if epoch != 0 and epoch // 150 != 0:
self.learning_rate = 0.0001
start_time = time.time()
for X_batch_index, y_batch_index in index_generator(len(y), self.batch_size, seed=seed):
X_batch = X[X_batch_index]
y_batch = y[y_batch_index]
sess.run(self._training_op, feed_dict={self._X: X_batch, self._y: y_batch, self._training:True, self._learning_rate:self.learning_rate})
# 下面用于 early stopping
if X_valid is not None and y_valid is not None:
loss_val, acc_val = sess.run([self._loss, self._accuracy],
feed_dict={self._X:X_valid, self._y:y_valid})
if loss_val < best_loss:
best_loss = loss_val
best_params = self._get_model_params()
checks_without_progress = 0
else:
checks_without_progress += 1
print("{}\tValidation loss: {.6f}\tBest loss: {:.6f}\tAccuracy: {:.2f}%".format(epoch,
loss_val, best_loss, acc_val*100))
if checks_without_progress >= max_check_without_progress:
print("Early stopping!")
else:
total_loss = 0
total_acc = 0
for i in range(len(y) // 8):
X_batch = X[i*8:(i+1)*8,:,:]
y_batch = y[i*8:(i+1)*8]
loss_train, acc_train = sess.run([self._loss, self._accuracy],
feed_dict={self._X:X_batch, self._y:y_batch})
total_loss += loss_train
total_acc += acc_train
end_time = time.time()
print("{}\ttraining loss: {:.6f}\t| training accuracy: {:.2f}% | time: {:.2f}s".format(epoch,
total_loss/(len(y)//8), (total_acc / (len(y)//8))*100, end_time-start_time))
if X_test is not None and y_test is not None and epoch % 1 == 0:
total_acc_test = 0
total_loss_test = 0
for i in range(len(y_test) // 8):
X_batch_test = X_test[i*8:(i+1)*8, :, :]
y_batch_test = y_test[i*8:(i+1)*8]
loss_test, acc_test = sess.run([self._loss, self._accuracy],
feed_dict={self._X:X_batch_test, self._y:y_batch_test, self._training:False})
total_acc_test += acc_test
total_loss_test += loss_test
if total_acc_test >= best_acc:
best_acc = total_acc_test
self.save("./my_model/train_model.ckpt") # 将训练模型保存
print("learning rate: ", self.learning_rate)
print("Test accuracy: {:.4f}%\t Test loss: {:.6f}".format((total_acc_test / (len(y_test) // 8))*100, total_loss_test/(len(y_test) // 8)))
# loss_test, acc_test = sess.run([self._loss, self._accuracy],
# feed_dict={self._X:X_test, self._y:y_test})
# print("Test accuracy: {:.4f}%\t Test loss: {:.6f}".format(acc_test*100, loss_test))
if best_params:
self._restore_model_params(best_params)
return self
def predict_proba(self, X):
if not self._session:
raise NotFittedError("This %s instance is not fitted yet" % self.__class__.__name__)
with self._session.as_default() as sess:
return self._predictions.eval(feed_dict={self._X: X})
def predict(self, X):
class_indices = np.argmax(self.predict_proba(X), axis=1)
return np.array([self.classes_[class_index] for class_index in class_indices], np.int32).reshape(-1)
def save(self, path):
self._saver.save(self._session, path)
def restore(self, path="./my_model/train_model.ckpt"):
self._saver.restore(self._session, path)
if __name__ == "__main__":
# 开始将数据集划分为训练集和测试集
np.random.seed(42)
permutation = list(np.random.permutation(40)) # 将数据随机打乱
train_index = [1] # 选择某一个人
test_index = [1] # 选择某一个人
trials_list = []
train_list = list(permutation[8:40])
test_list = list(permutation[0:8])
temp1 = (train_list, test_list)
trials_list.append(temp1)
train_list = list(permutation[0:8]) + list(permutation[16:40])
test_list = list(permutation[8:16])
temp1 = (train_list, test_list)
trials_list.append(temp1)
train_list = list(permutation[0:16]) + list(permutation[24:40])
test_list = list(permutation[16:24])
temp1 = (train_list, test_list)
trials_list.append(temp1)
train_list = list(permutation[0:24]) + list(permutation[32:40])
test_list = list(permutation[24:32])
temp1 = (train_list, test_list)
trials_list.append(temp1)
train_list = list(permutation[0:32])
test_list = list(permutation[32:40])
temp1 = (train_list, test_list)
trials_list.append(temp1)
assert(len(trials_list) == 5)
num_ = 0
F1_scores_list = []
accuracy_list = []
samples_info = []
for train_trial_list, test_trial_list in trials_list:
num_ = num_ + 1
# 获取生理信号数据
# datas_train, train_labels = read_data(people_list=train_index, classify_object_name=0, train_flag=True,
# trial_list=train_trial_list, windows=9, overlapping=8,
# cross_validation_number=num_)
# datas_test, test_labels = read_data(people_list=test_index, classify_object_name=0, train_flag=False,
# trial_list=test_trial_list, windows=9, overlapping=8,
# cross_validation_number=num_)
datas_train = np.load("../common/samples_single_people/valence_old/s0/train_datas"+str(num_)+".npy")
train_labels = np.load("../common/samples_single_people/valence_old/s0/train_labels"+str(num_)+".npy")
datas_test = np.load("../common/samples_single_people/valence_old/s0/test_datas"+str(num_)+".npy")
test_labels = np.load("../common/samples_single_people/valence_old/s0/test_labels"+str(num_)+".npy")
datas_train = np.array(datas_train)
train_labels = np.array(train_labels)
datas_test = np.array(datas_test)
test_labels = np.array(test_labels)
print("train data set number: ", len(train_labels))
print("train datas shape: ", datas_train.shape)
print("test data set number: ", len(test_labels))
print("test datas shape: ", datas_test)
print("train label 0: ", sum(train_labels==0), " train label 1: ", sum(train_labels==1))
print("test label 0: ", sum(test_labels==0), " test label 1: ", sum(test_labels==1))
train_label_0 = sum(train_labels==0)
test_label_0 = sum(test_labels==0)
label_0 = (train_label_0, test_label_0)
samples_info.append(label_0)
datas_train = datas_train.transpose((0,2,1))
datas_test = datas_test.transpose((0,2,1))
# datas_train = datas_train.reshape(datas_train.shape[0], -1, 1)
# datas_test = datas_test.reshape(datas_test.shape[0], -1, 1)
print("train number: ", len(train_labels))
print(datas_train.shape, train_labels.shape)
print("test number: ", len(test_labels))
print(datas_test.shape, test_labels.shape)
n_classes = 2 # 貌似没有用到------------
input_channels = datas_train.shape[-1]
seq_length = datas_train.shape[-2] # 序列的长度
dropout = 0.5
learning_rate=0.001
num_channels = [128, 64, 32] # 有多少层,及每一层包含的神经元个数(这里的一层指一个 block)
kernel_size = 3 # 卷积核大小
batch_size = 64
# 开始构建TCN 模型实例
tcn = TCNClassifier(num_channels=num_channels, sequence_length = seq_length, kernel_size=kernel_size,
dropout=dropout, batch_size=batch_size, in_channels=input_channels,
random_state=42, learning_rate=learning_rate)
tcn.fit(X=datas_train, y=train_labels, n_epochs=351, X_test=datas_test, y_test=test_labels)
tcn.restore()
total_acc_test = 0
y_pred_labels = []
for i in range(len(test_labels) // 8):
X_batch_test = datas_test[i*8:(i+1)*8, :, :]
y_batch_test = test_labels[i*8:(i+1)*8]
y_pred = tcn.predict(X_batch_test)
y_pred_labels += list(y_pred)
total_acc_test += accuracy_score(y_batch_test, y_pred)
print("Test accuracy: {:.4f}%".format((total_acc_test / (len(test_labels) // 8))*100))
F1_scores_list.append(f1_score(test_labels, np.array(y_pred_labels)))
total_acc_test1 = 0
total_loss_test = 0
for i in range(len(test_labels) // 8):
X_batch_test = datas_test[i*8:(i+1)*8, :, :]
y_batch_test = test_labels[i*8:(i+1)*8]
loss_test, acc_test = tcn._session.run([tcn._loss, tcn._accuracy],
feed_dict={tcn._X:X_batch_test, tcn._y:y_batch_test})
total_acc_test1 += acc_test
total_loss_test += loss_test
print("Test accuracy: {:.4f}%\t Test loss: {:.6f}".format((total_acc_test1 / (len(test_labels) // 8))*100, total_loss_test/(len(test_labels) // 8)))
temp = (total_acc_test / (len(test_labels)//8), total_acc_test1 / (len(test_labels)//8))
accuracy_list.append(temp)
print("-------------------------------accuracy_list--------------------------------------")
print(accuracy_list)
print("-------------------------------F1_score--------------------------------------")
print(F1_scores_list)
print("-------------------------------sampels info--------------------------------------")
print(samples_info)
|
import smbus
i2c_bus = smbus.SMBus(1)
DEVICE_ADDRESS = 0x08
DISABLE = 2147483647
ENABLE = 2147483646
def send_step(n):
i2c_bus.write_block_data(DEVICE_ADDRESS, 0x00, list(n.to_bytes(4, byteorder='big')))
def step_enable(enable):
send_step(enable*ENABLE or DISABLE)
def main():
step_enable(False)
send_step(0)
step_enable(True)
try:
while True:
n = int(input("what position? "))
send_step(n)
except KeyboardInterrupt:
step_enable(False)
print()
if __name__ == "__main__":
main() |
from django.db import models
from django.utils import timezone
class Note(models.Model):
class Meta:
ordering = ['must_complete_before']
author = models.ForeignKey('auth.User')
task = models.CharField(max_length=40)
create_date = models.DateTimeField(default=timezone.now)
must_complete_before = models.DateTimeField()
complete_date = models.DateTimeField(default=timezone.now)
complete_value = models.BooleanField(default=False)
def complete_task(self):
self.complete_value = True
self.complete_date = timezone.now()
self.save()
return ''
def un_complete_task(self):
self.complete_value = False
self.save()
return ''
def you_have_time(self):
return str(self.must_complete_before - timezone.now())[:-7]
def check_you_heve_time(self):
if self.you_have_time()[0] == '-':
return True
else:
return False
def __str__(self):
return self.task
class Dream(models.Model):
class Meta:
ordering = ['-priority_dream']
author = models.ForeignKey('auth.User', null=True)
my_dream = models.CharField(max_length=40)
detail_dream = models.TextField(blank=True)
priority_dream = models.DecimalField(max_digits=4, decimal_places=2)
rating = models.DecimalField(max_digits=6, decimal_places=2, null=True)
in_top = models.BooleanField(default=False)
def add_this_dream(self):
self.in_top = True
self.save()
return ''
def add_rating(self):
self.rating += 1
self.save()
return ''
def __str__(self):
return self.my_dream
|
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from subprocess import call
import pyrebase, json, requests
@csrf_exempt
def echo(req):
fileUrl = str(req.POST['fileUrl'])
config = {
"apiKey": "AIzaSyDCUr8ng_lqfuwHEzOTE-yF2mbarPpBm5M",
"authDomain": "boba-eecca.firebaseapp.com",
"databaseURL": "https://boba-eecca.firebaseio.com",
"storageBucket": "boba-eecca.appspot.com",
}
firebase = pyrebase.initialize_app(config)
auth = firebase.auth()
user = auth.sign_in_with_email_and_password('leem@plz.com', 'qweqwe')
storage = firebase.storage()
url=fileUrl.split('?')
fileName=url[0].split('/')
hwpName=fileName[len(fileName)-1] #hwpName=lecture.hwp
fileN=fileName[len(fileName)-1].split('.') #fileN[0]=lecture
pdfName=fileN[0]+'.pdf' #pdfName=lecture.pdf
storage.child(hwpName).download( hwpName , user['idToken'])
call('/home/jh/HWPtoPDF_Django/home/jh/.local/bin/hwp5html '+hwpName, shell=True) #transformation
cssFile = fileN[0] + '/styles.css'
f = open(cssFile,"a")
modifyCss = ".Paper { border: 1px solid white;} body { padding: 0px; white-space:pre-wrap; }"
f.write(modifyCss)
f.close()
call('wkhtmltopdf -s A5 ./'+fileN[0]+'/index.xhtml '+pdfName, shell=True)
call('rm -rf '+fileN[0]+' '+hwpName, shell=True) #remove files in server
uploadfile = "./"+pdfName
storage.child(pdfName).put(uploadfile)
fileUrl = str(storage.child(pdfName).get_url(1)) #get pdf's new url
call('rm '+pdfName, shell=True) #remove pdf file in server
return HttpResponse(fileUrl)
|
import abc
class Base(abc.ABC):
@classmethod
@abc.abstractmethod
def factory(cls, *args):
return cls()
@staticmethod
@abc.abstractmethod
def const_behavior():
return 'Should never reach here'
class Implementation(Base):
def do_something(self):
pass
@classmethod
def factory(cls, *args):
obj = cls(*args)
obj.do_something()
return obj
@staticmethod
def const_behavior():
return 'Static behavior differs'
try:
o = Base.factory()
print('Base.value:', o.const_behavior())
except Exception as err:
print('ERROR:', str(err))
i = Implementation.factory()
print('Implementation.const_behavior :', i.const_behavior())
|
"""
In this problem, median is defind below:
the median of a set S of n integers = the ceil(n / 2)-th smallest element in S
Task is to find the median in any arrays.
Naive algorithm: O(nlogn)
Median of the medians algorithm: O(n)
"""
from math import ceil
def select(arr, k):
if not arr or k < 0 or k >= len(arr):
raise ValueError('Invalid input!')
return _select(arr, k)
def _select(arr, k):
# base case
if len(arr) <= 5:
arr.sort()
return arr[k]
# divide array into sunarrays with 5 elements each
num_groups = len(arr) // 5
groups = []
for i in range(num_groups):
groups.append(arr[i * 5:(i + 1) * 5])
if num_groups * 5 < len(arr):
groups.append(arr[num_groups * 5:])
# find median for each group
medians = [_select(group, ceil(len(group) / 2) - 1) for group in groups]
# take the median of the medians as pivot
pivot = _select(medians, ceil(len(medians) / 2) - 1)
# partition the original array
lower, equal, larger = [], [], []
for num in arr:
if num < pivot:
lower.append(num)
elif num == pivot:
equal.append(num)
else:
larger.append(num)
if k >= len(lower) and k < len(lower) + len(equal):
return pivot
elif k < len(lower):
return _select(lower, k)
return _select(larger, k - len(lower) - len(equal))
def median_of_the_medians(arr):
return select(arr, ceil(len(arr) / 2) - 1)
def naive_median(arr):
if len(arr) % 2 == 1:
return sorted(arr)[len(arr) // 2]
return sorted(arr)[len(arr) // 2 - 1]
if __name__ == '__main__':
from random import randint
for i in range(10):
arr = [randint(-1000, 1000) for _ in range(randint(20, 200))]
median_test = median_of_the_medians(arr)
median_real = naive_median(arr)
if median_test != median_real:
print("Test case: %d failed! Expect: %f, get: %f." %
(i, median_real, median_test))
else:
print("Test case: %d succeeded! Result: %f." % (i, median_test))
|
from rookcore import web_server
from rookcore.reactive import *
from . import web_server_common
class MyHandler(web_server.Handler, web_server_common.ServerIface):
async def run(self, websocket):
await self.run_rpc(websocket, root_obj=self)
@classmethod
def get_user_code(self):
return [
'rookcore.*', 'rookwidget.*',
'example', 'example.web_server_client', 'example.web_server_common']
@classmethod
def get_main_code(self):
return 'import example.web_server_client; example.web_server_client.client_run()'
async def welcome(self, who):
print('hello %s' % who)
return 'Hello, %s' % who
async def welcome_reactive(self, who):
return reactive(lambda: 'Hello, %s' % who.value)
if __name__ == '__main__':
web_server.WebServer(MyHandler()).main('localhost', 4000)
|
from __future__ import absolute_import
# /////////////////////////////////////////////////////////////////////////////
# Bundle property O-R mapping classes
# see Conf() docstring
# /////////////////////////////////////////////////////////////////////////////
import splunk
import splunk.auth as auth
import splunk.entity as entity
import splunk.rest as rest
import splunk.util as util
import logging
logger = logging.getLogger('splunk.bundle')
def getConf(confName, sessionKey=None, namespace=None, owner=None, overwriteStanzas=False, hostPath=None):
'''
Parses a logical bundle file and returns a Conf() object
If namespace=None, then the behavior is 3.2-style, where all writes are
done to conf files in etc/system/local. All reads will merge every conf
file that is accessible in etc/system and etc/apps/*. If a namespace is
provided, then writes are done in etc/apps/<namespace>/local/, and reads
are restricted to values in etc/apps/<namespace>/(default|local). If
overwriteStanzas is true, old keys in edited stanzas will not be preserved.
For the 3.2-style reading, the endpoint uses the following priority:
system/local
apps/<namespace>/local
apps/<namespace>/default
system/default
'''
# fallback to current user
if not owner:
owner = auth.getCurrentUser()['name']
uri = entity.buildEndpoint(entityClass='properties', entityName=confName, namespace=namespace,
owner=owner, hostPath=hostPath)
# the fillcontents arg will push all stanza keys down in 1 request instead
# of iterating over all stanzas
serverResponse, serverContent = rest.simpleRequest(uri, getargs={'fillcontents':1}, sessionKey=sessionKey)
if serverResponse.status != 200:
logger.info('getConf - server returned status=%s when asked for conf=%s' % (serverResponse.status, confName))
# convert the atom feed into dict
confFeed = rest.format.parseFeedDocument(serverContent)
stanzas = confFeed.toPrimitive()
# create Conf/Stanzas
output = Conf(confName, namespace=namespace, owner=owner, overwriteStanzas=overwriteStanzas)
output.sessionKey = sessionKey
output.isImportMode = True
for name in stanzas:
stanza = output.createStanza(name)
stanza.needsPopulation = False
for k in stanzas[name]:
if stanzas[name][k] == None:
stanza[k] = ''
else:
stanza[k] = stanzas[name][k]
output.isImportMode = False
return output
def createConf(confName, namespace=None, owner=None, sessionKey=None, hostPath=None):
'''
Creates a new conf file. Returns a conf instance of the newly created
.conf file.
'''
uri = entity.buildEndpoint('properties', namespace=namespace, owner=owner, hostPath=hostPath)
postargs = {'__conf': confName}
status, response = rest.simpleRequest(uri, postargs=postargs, sessionKey=sessionKey, raiseAllErrors=True)
# Expect 201 on creation or 200 on preexisting file (automatic handling of 303 redirect).
if not ((status.status == 201) or (status.previous is not None and status.status == 200)):
logger.error('createConf - unexpected server response while creating conf file "%s"; HTTP=%s' % (confName, status.status))
return getConf(confName, namespace=namespace, owner=owner, sessionKey=sessionKey, hostPath=hostPath)
class Conf(util.OrderedDict):
'''
Represents a logical .conf group, and provides read/write services to the
bundle system in splunkd.
Conf is a direct O-R mapping to the CLI property system, and is able to
interact with the individual stanzas and properties on a real-time or
deferred basis. The attribute hierarchy matches that of:
<conf_object>[<stanza_name>][<key_name>]
Getting and setting stanzas or key/value pairs is the same as any python
dictionary:
myConf = getConf('prefs', mysessionKey)
# get the 'default' stanza in the 'prefs' conf file
s = myConf['default']
# get the 'color' property in the 'default' stanza of the 'prefs' conf
color = myConf['default']['color']
# set the 'color' property in the 'default' stanza of the 'prefs' conf
# this is an immediate write
myConf['default']['color'] = 'green'
If you are doing a large number of writes, you can defer the commit action
as follows:
myConf.beginBatch()
myConf['default']['car1'] = 'honda'
myConf['default']['car2'] = 'bmw'
myConf['default']['car3'] = 'lexus'
myConf['default']['car4'] = 'pinto'
myConf['default']['car5'] = 'VW'
myConf.commitBatch()
'''
def __init__(self, name, namespace=None, owner=None, overwriteStanzas=False):
# amrit moved creation of "stanzas" to before calling __init__ from parent
# (OrderedDict) to avoid a circular init we were seeing. OrderedDict.__init__
# was calling our __getitem__, resulting in trying to iterate a self.stanzas
# that had not been defined yet! No idea why this started showing up only
# during our Python 3 migration, but here we are.
self.stanzas = StanzaCollection()
super(Conf, self).__init__(self)
self.name = name
self.namespace = namespace
self.owner = owner
self.sessionKey = None
self.queue = []
self.isAtomic = False
self.isImportMode = False
self.overwriteStanzas = overwriteStanzas
def findStanzas(self, match = '*'):
'''
Returns a list of all the stanzas that match a given string. Simple
wildcard is allowed at the beginning and end of the match string.
'''
output = StanzaCollection()
if match == '*':
output.update(self.stanzas)
elif match.startswith('*'):
found = [(x, self.stanzas[x]) for x in self.stanzas if x.endswith(match[1:])]
output.update(dict(found))
elif match.endswith('*'):
found = [(x, self.stanzas[x]) for x in self.stanzas if x.startswith(match[0:-1])]
output.update(dict(found))
else:
found = [(x, self.stanzas[x]) for x in self.stanzas if x == match]
output.update(dict(found))
return output
def findKeys(self, match = '*'):
'''
Returns a dictionary of keys from all stanzas that match the input
string. Simple wildcard is allowed at the end of the match string.
'''
output = {}
for stanzaName in self.stanzas:
output.update(self.stanzas[stanzaName].findKeys(match))
return output
def beginBatch(self):
'''
Defers all subsequent calls to set attribute values until the
commitBatch() method is called. If commitBatch() is not
called, the Python representation will become out of sync until
the Conf() object is refreshed.
'''
self.isAtomic = True
def commitBatch(self, sessionKey = None):
'''
Commits all edits to the bundle since a beginBatch() call.
Returns false if beginBatch() was not called; true otherwise.
'''
if not self.isAtomic or len(self.queue) == 0: return False
if sessionKey: self.sessionKey = sessionKey
batchKeys = {}
stanza = ''
while len(self.queue):
item = self.queue.pop(0)
if stanza and item['stanza'] != stanza:
self._executeBatch(stanza, batchKeys)
batchKeys = {}
stanza = item['stanza']
batchKeys[item['key']] = item['value']
self._executeBatch(stanza, batchKeys)
self.isAtomic = False
return True
def createStanza(self, name = 'default'):
'''
Initializes a new Stanza object in the current Conf object and
assigns a name.
'''
if self.isImportMode: needsPopulation = True
else: needsPopulation = False
self.stanzas[name] = Stanza(self, name, needsPopulation)
return self.stanzas[name]
def _setKeyValue(self, stanza, key, value):
args = {'stanza': stanza, 'key': key, 'value': value}
if not self.isAtomic:
self._executeSingle(**args)
else:
self.queue.append(args)
#print('_setKeyValue: QUEUE %s %s=%s' % (stanza, key, value))
def getEndpointPath(self, conf=None, stanza=None, key=None):
'''
Returns the splunkd URI for the specified combination of conf file,
stanza, and key name. The namespace and owner context are pulled from
the current Conf() instance.
'''
path = [entity.buildEndpoint('properties', namespace=self.namespace, owner=self.owner)]
parts = []
if conf:
parts.append(conf)
if stanza:
parts.append(stanza)
if key:
parts.append(key)
path.extend([util.safeURLQuote(shard, '') for shard in parts])
return '/'.join(path)
def _executeSingle(self, stanza, key, value = ''):
'''
Commits a write action on a single key/value pair
'''
if self.isImportMode: return
logger.debug('_executeSingle: stanza=%s => %s=%s' % (stanza, key, value))
# first check if stanza exists; create if necessary
try:
uri = self.getEndpointPath(self.name, stanza)
rest.simpleRequest(uri, sessionKey=self.sessionKey)
except splunk.ResourceNotFound:
createUri = self.getEndpointPath(self.name)
serverResponse, serverContent = rest.simpleRequest(
createUri,
self.sessionKey,
postargs={'__stanza': stanza}
)
# now write the key
serverResponse, serverContent = rest.simpleRequest(
uri,
self.sessionKey,
postargs={key: value},
method=self._getWriteMethod()
)
if serverResponse.status != 200:
logger.error('_executeSingle - HTTP error=%s server returned: %s' % (serverResponse.status, serverContent))
raise splunk.RESTException(serverResponse.status, '_executeSingle - server returned: %s' % serverContent)
def _executeBatch(self, stanza, kvPairs):
if self.isImportMode: return
logger.debug('_executeBatch: stanza=%s => %s' % (stanza, kvPairs))
# first check if stanza exists; create if necessary
try:
uri = self.getEndpointPath(self.name, stanza)
rest.simpleRequest(uri, sessionKey=self.sessionKey)
except splunk.ResourceNotFound:
createUri = self.getEndpointPath(self.name)
serverResponse, serverContent = rest.simpleRequest(
createUri,
self.sessionKey,
postargs={'__stanza': stanza}
)
# now write out the keys
serverResponse, serverContent = rest.simpleRequest(
uri,
self.sessionKey,
postargs=kvPairs,
method=self._getWriteMethod()
)
if serverResponse.status != 200:
logger.error('_executeBatch - HTTP error=%s server returned: %s' % (serverResponse.status, serverContent))
raise splunk.RESTException(serverResponse.status, '_executeBatch - server returned: %s' % serverContent)
def _getWriteMethod(self):
return self.overwriteStanzas and 'PUT' or 'GET'
def _refreshStanza(self, stanzaName):
uri = self.getEndpointPath(self.name, stanzaName)
serverResponse, serverContent = rest.simpleRequest(uri, sessionKey=self.sessionKey)
#logger.debug('_refreshStanza - got stanza data back')
keys = rest.format.parseFeedDocument(serverContent)
keys = keys.toPrimitive()
#logger.debug('_refreshStanza - parsed stanza data; got %s keys' % len(keys))
self.isImportMode = True
for k in keys:
self.stanzas[stanzaName][k] = keys[k]
self.isImportMode = False
def __getitem__(self, key):
if key not in self.stanzas:
self.createStanza(key)
if self.stanzas[key].needsPopulation:
logger.debug('stanza=%s needs loading...' % key)
self._refreshStanza(key)
self.stanzas[key].needsPopulation = False
return self.stanzas[key]
def __setitem__(self, key, value):
raise NotImplementedError('Direct attribute setting is not allowed. Use the createStanza() method instead.')
def __iter__(self):
return self.stanzas.__iter__()
def __len__(self):
return self.stanzas.__len__()
def __str__(self):
return self.stanzas.__str__()
def __repr__(self):
o = [x for x in self.stanzas]
return o.__repr__()
def __contains__(self, key):
return self.stanzas.__contains__(key)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def keys(self):
try:
return list(self.stanzas.keys())
except AttributeError:
return dict().keys()
class StanzaCollection(util.OrderedDict):
'''
Represents a collection of stanzas.
'''
def __init__(self, *args, **kwds):
super(StanzaCollection, self).__init__(self, *args, **kwds)
def getMerged(self):
'''
Returns a single stanza with all the keys merged according to the
bundle merge rules
'''
namelist = sorted(self.keys())
namelist.reverse()
output = Stanza()
for name in namelist:
output.update(self[name])
return output
class Stanza(util.OrderedDict):
'''
Represents a stanza block, as defined by the bundle system. Contains a
dictionary of key/value pairs.
'''
def findKeys(self, match = '*'):
'''
Returns a dictionary of keys from the curren stanza that match the input
string. Simple wildcard is allowed at the end of the match string.
'''
if match == '*' or not match:
return dict(self)
elif match.endswith('*'):
o = [(x, self[x]) for x in self if x.startswith(match[0:-1])]
else:
o = [(x, self[x]) for x in self if x == match]
return dict(o)
def isDisabled(self):
try:
val = self["disabled"]
return (val == "true")
except:
return False
def __init__(self, confRef = None, name = '', needsPopulation=False):
super(Stanza, self).__init__(self)
self.confRef = confRef
self.name = name
self.needsPopulation = needsPopulation
def __setitem__(self, key, value):
if self.confRef:
self.confRef._setKeyValue(self.name, key, value)
super(Stanza, self).__setitem__(key, value)
def __delitem__(self, key):
raise NotImplementedError('Attribute deletion is not supported. Use an empty value instead.')
def __str__(self):
return 'Stanza [%s] %s' % (self.name, super(Stanza, self).__str__())
# tests
if __name__ == '__main__':
import unittest
import time
#logging.basicConfig(level=logging.DEBUG)
class MainTest(unittest.TestCase):
def setUp(self):
self.sessionKey = auth.getSessionKey('admin', 'changeme')
def test1_SingleWrites(self):
bun = getConf('web', sessionKey=self.sessionKey)
bun['delete_me_1']['test_key1'] = 'single write 1'
bun['delete_me_1']['test_key2'] = 'single write 2'
verify = getConf('web', sessionKey=self.sessionKey)
self.assertEqual(verify['delete_me_1']['test_key1'], 'single write 1')
self.assertEqual(verify['delete_me_1']['test_key2'], 'single write 2')
def test2_BatchWrites(self):
bun = getConf('web', sessionKey=self.sessionKey)
bun.beginBatch()
bun['delete_me_1']['test_key1'] = 'batch write 1'
bun['delete_me_1']['test_key3'] = 'batch write 2'
bun['delete me 2']['test_key4'] = 'batch write 3'
bun['delete me 2']['test_key5'] = 'batch write 4'
bun.commitBatch()
verify = getConf('web', sessionKey=self.sessionKey)
self.assertEqual(verify['delete_me_1']['test_key1'], 'batch write 1')
self.assertEqual(verify['delete_me_1']['test_key3'], 'batch write 2')
self.assertEqual(verify['delete me 2']['test_key4'], 'batch write 3')
self.assertEqual(verify['delete me 2']['test_key5'], 'batch write 4')
def test3_StanzaCollection(self):
'''
test the ordered dictionary nature of StanzaCollection
'''
sc = StanzaCollection()
keys = 'abcdefghijklmnopqrstuvwxyz'
for char in keys:
sc[char] = 'foo'
for i, k in enumerate(sc):
self.assertEquals(k, keys[i])
def test4_NamespaceWrite(self):
'''
Check write, and subsequent read of key value sent to the
debug namespace
'''
# check that namespace is set
conf = getConf('web', namespace='testing', sessionKey=self.sessionKey)
self.assertEqual(conf.namespace, 'testing')
# add value to 'testing' NS only
conf['delete_me_3']['test_key6'] = 'ns_write_1'
conf = getConf('web', namespace='testing', sessionKey=self.sessionKey)
self.assertEqual(conf['delete_me_3']['test_key6'], 'ns_write_1')
# verify that value is not available in different NS
conf = getConf('web', namespace='search', sessionKey=self.sessionKey)
self.assertRaises(KeyError, conf['delete_me_3'].__getitem__, 'test_key6')
# verify presence using legacy non-namespace mode
#
# TODO: should this be valid?
#
#conf = getConf('web', sessionKey=self.sessionKey)
#self.assertNotEqual(conf['settings'].get('delete_me_3'), None, 'Failed to find delete_me_3 stanza')
def test_createConf(self):
'''
Check creating new conf file
'''
confName = 'testconf_%s' % round(time.time())
newConf = createConf(confName, namespace="testing", sessionKey=self.sessionKey)
self.assert_(isinstance(newConf, Conf))
challenge = getConf(confName, namespace="testing", sessionKey=self.sessionKey)
self.assertEquals(challenge.name, confName)
def test_findStanzaPrefix(self):
conf = getConf('indexes', namespace='search', sessionKey=self.sessionKey)
stanzas = conf.findStanzas('_block*')
self.assertEquals(len(stanzas), 1)
self.assertEquals(list(stanzas.keys())[0], '_blocksignature')
def test_findStanzaSuffix(self):
conf = getConf('indexes', namespace='search', sessionKey=self.sessionKey)
stanzas = conf.findStanzas('*bucket')
self.assertEquals(len(stanzas), 1)
self.assertEquals(list(stanzas.keys())[0], '_thefishbucket')
def test_emptyValueWrite(self):
'''
setting a new key to an empty value will not get persisted
'''
# try write of empty value
conf = getConf('web', namespace='search', sessionKey=self.sessionKey)
stanza = conf['test']
stanza['emptyKey'] = ''
# confirm empty value
conf = getConf('web', namespace='search', sessionKey=self.sessionKey)
stanza = conf['test']
self.assert_('emptyKey' not in stanza, '"emptyKey" key was written when it was not expected to')
def test_remote_hostpath(self):
conf = getConf('web', namespace='search', sessionKey=self.sessionKey)
self.assert_(isinstance(conf, Conf), "The optional argument hostPath works when ignored")
conf = getConf('indexes', namespace='search', sessionKey=self.sessionKey, hostPath=splunk.getLocalServerInfo())
self.assert_(isinstance(conf, Conf), "The optional argument hostPath works when used")
confName = 'testconf_%s' % round(time.time())
newConf = createConf(confName, namespace="testing", sessionKey=self.sessionKey, hostPath=splunk.getLocalServerInfo())
self.assert_(isinstance(newConf, Conf), "The optional argument hostPath works when used in createConf")
suite = unittest.TestLoader().loadTestsFromTestCase(MainTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
from django.db import models
# Create your models here.
# as classes serão criadas aqui
# code first - fazer o código primeiro e depois gerar o bd em uma aplicação
# python utiliza o code first
# o models é a herança de tudo que tem de Model no django
class Pessoa(models.Model):
nome = models.CharField(
max_length = 255,
verbose_name = 'Nome'
)
sobrenome = models.CharField(
max_length = 255,
verbose_name = 'Sobrenome'
)
SEXOS = (
('M', 'Masculino'),
('F', 'Feminino'),
#lado esquerdo - como fica salvo no bd
#lado direito - como mostra para o usuário
)
sexo = models.CharField(
max_length = 255,
verbose_name = 'Sexo',
choices = SEXOS
)
email = models.EmailField(
max_length = 255,
verbose_name = 'E-mail',
blank = False
)
biografia = models.TextField(
null = True,
blank = True
)
data_de_criacao = models.DateTimeField(auto_now_add=True)
ativo = models.BooleanField(default=True)
#retorna uma string
def __str__(self):
return self.nome + ' ' + self.sobrenome
|
s=input("请输入字符串");
sub="abba"#sub="bob";
start=0;
len_sub=len(sub);
num=0;
len_s=len(s);
while(start+len_sub-1<len_s):
num+=s.count(sub,start,start+len_sub);
start+=1;
print("Number of times bob occurs is:")
print(num);
|
class NumberGuesser:
def guess(self, leftOver):
for a in range(1, 9999):
bList = self.getPossibleB(a)
for b in bList:
if a > b:
c = a - b
c = str(c)
while '0' in c:
c = self.removeDigit(c, '0')
valid = True
for x in leftOver:
if x != '0':
if x in c:
c = self.removeDigit(c, x)
else:
valid = False
break
if not valid:
continue
if len(c) == 1 and c[0] > '0':
return c[0]
def getPossibleB(self, a):
nonZeroDigits = []
for x in str(a):
if x != '0':
nonZeroDigits.append(int(x))
result = []
self.recursiveFill(0, nonZeroDigits, result)
return result
def recursiveFill(self, curValue, leftDigits, result):
if curValue > 9998:
return
# Move curValue into result.
if sum(leftDigits) == 0:
if curValue >= 1 and curValue <= 9998:
result.append(curValue)
# Append 0.
if curValue != 0:
value = curValue * 10
self.recursiveFill(value, leftDigits, result)
# Append one digit.
for i in range(len(leftDigits)):
if leftDigits[i] != 0:
tmp = leftDigits[i]
value = curValue * 10 + leftDigits[i]
leftDigits[i] = 0
self.recursiveFill(value, leftDigits, result)
leftDigits[i] = tmp
def removeDigit(self, s, ch):
for i in range(len(s)):
if s[i] == ch:
return s[:i] + s[i+1:]
|
from django.contrib import admin
from SmartSuperHero.models import Doctor, Patient, GenericQuestion, Question, Report
# Register your models here.
admin.site.register(Doctor)
admin.site.register(Patient)
admin.site.register(GenericQuestion)
admin.site.register(Question)
admin.site.register(Report) |
# lista = []
# n = int(input())
# input_strings = input("Numerele tale:")
# input_strings = input_strings.split()
# for i in range(len(input_strings)):
# lista.append(int(input_strings[i]))
# quick sort -> algoritm divide et impera
# alegi un pivot si pui numerele mai mici decat pivotul in stanga si pe cele mai mari in dreapta
# spatiu extra nu este necesar
# functia partitie -> ia elementul pivot si il pune in pozitia corecta din lista
def partitie(listaP, st, dr):
i = st - 1
pivot = listaP[dr]
# acum punem elementele mai mici decat pivot la stanga si alea mai mari la dreapta
for j in range(st, dr):
if listaP[j] <= pivot:
i += 1
listaP[i], listaP[j] = listaP[j], listaP[i]
# replace la valoarea care este mai mare decat pivotul
# stim ca valoarea este pe lista[i + 1], iar pivotul pe lista[dr]
listaP[i + 1], listaP[dr] = listaP[dr], listaP[i + 1]
return (i + 1) # returneaza indexul partitiei
def quickSort(listaP, st, dr):
if st < dr:
pi = partitie(listaP, st, dr)
quickSort(listaP, st, pi - 1)
quickSort(listaP, pi + 1, dr)
# quickSort(lista, 0, len(lista) - 1)
# print(lista) |
# Imports
import random
import numpy as np
from sklearn.metrics import confusion_matrix, auc
def one_hot_dna(seq, exp_len):
'''
One-hot encodes DNA sequence data.
Parameters
----------
seq : list
Input list of DNA sequences (str).
exp_len : int
Expected length of output sequences.
Returns
----------
encode: list
List of one-hot encoded DNA sequences.
'''
d = {'A': 0, 'T':1, 'G':2, 'C':3}
encode = []
for dna in seq:
one_hot_list = []
for nuc in dna:
c = d[nuc]
m = np.zeros([4, 1])
m[c] = 1
one_hot_list.append(m)
if len(one_hot_list) != exp_len:
continue
one_hot_array = np.vstack(one_hot_list)
encode.append(one_hot_array)
return encode
def gen_label_array(s):
'''
Generate a label array of size (m, n), where each column contains
m-1 zeros and a single one value.
Parameters
----------
s : tuple
Tuple of label array size (m, n).
Returns
----------
lab: np.array
Array where each column is a single label array.
'''
m, n = s[0], s[1]
values = np.random.choice(list(range(0, m)), size=(1, n))
n_values = np.max(values) + 1
value_array = np.eye(n_values)[values]
lab = value_array[0, :, :].T
return lab
def sample_array(array, samp_size, freq):
'''
Sample an array continuously along the rows.
Parameters
----------
array : np.array
Array to be sampled from.
samp_size : int
Length of range of values to be samples continuously.
freq : int
frequency of sampling.
Returns
----------
sample : np.array
Samples array.
'''
t = array.shape[0]/freq
r = samp_size*freq
sample_list = []
for i in range(0, array.shape[1]):
n = random.randint(0, t-samp_size)*freq
sample_list.append(array[n:n+r, i:i+1])
sample = np.hstack(sample_list)
return sample
def train_test_split(array, train_num):
'''
Split an array randomly along columns into training and testing arrays.
Parameters
----------
array : np.array
Array of data to be split along columns.
train_num :
Number of columns to be in training array.
Returns
----------
train_array : np.array
Array of training data.
test_array : np.array
Array of testing data.
'''
full_ind = list(range(0, array.shape[1]))
train_ind = np.random.choice(array.T.shape[0], train_num, replace=False)
test_ind = np.array([x for x in full_ind if x not in train_ind])
train_array = array[:, tuple(train_ind)]
test_array = array[:, tuple(test_ind)]
return train_array, test_array
def split(a, n):
'''
Split a list or 1D array into approximately equal sized lists or 1D arrays.
Parameters
----------
a : np.array or list
Array or list of data to be split.
n : int
Number of sub lists or arrays to output.
Returns
----------
train_array : np.array
Array of training data.
test_array : np.array
Array of testing data.
'''
k, m = divmod(len(a), n)
s = (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
return s
def pred_gen(scores):
'''
Generates list of binary predictions for all possible threshold values.
Parameters
----------
scores : np.array
Array of predicted values.
Returns
----------
pred_list : list
Lists of arrays of binary predictions.
'''
pred_list = []
for thresh in np.sort(scores):
pred = []
for value in scores:
if value >= thresh:
pred.append(1)
else:
pred.append(0)
pred_list.append(pred)
return pred_list
def pr_calc(actual, prediction_list):
'''
Calculates true positive rate and false positive rate for lists of binary
predictions.
Parameters
----------
actual : np.array
Array of ground truth binra values.
prediction_list : list
Lists of arrays of binary predictions.
Returns
----------
tpr : list
List of true positive rate values.
fpr : list
List of false positive rate values.
'''
tpr, fpr = [], []
for prediction in prediction_list:
cm = confusion_matrix(actual, prediction)
tn, fp, fn, tp = cm.ravel()
tpr.append(tp/(tp + fn))
fpr.append(fp/(fp + tn))
return tpr, fpr
|
from django.contrib import admin
from .models import user_mailcompose_tb,user_mailsave_tb,contacts_tb,user_hobby
# Register your models here.
admin.site.register(user_mailcompose_tb)
admin.site.register(user_mailsave_tb)
admin.site.register(contacts_tb)
admin.site.register(user_hobby) |
import scrapy
import time
import datetime
import re
import json
from REI.scraper import get_ajax_url
from REI.scraper import get_price_history
from bs4 import BeautifulSoup
from REI.crawl import gen_urls
from random import randint
from scrapy.http.request import Request
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from REI.items import HouseItem
class ZillowSpiderSpider(CrawlSpider):
name = "zillow"
allowed_domains = ["zillow.com"]
visited = True
requests = 0
max_interval = 10
request_interval = 10
pauseEnabled = False;
start_urls = ( 'http://www.zillow.com/homes/for_sale/AZ/fsba,fsbo,new_lt/house,condo,apartment_duplex,townhouse_type/8_rid/days_sort/33.643688,-112.216523,33.61814,-112.261584_rect/14_zm/0_mmm/',
)
rules = (
# Extract links matching 'homes' (but not matching 'subsection.php')
# and follow links from them (since no callback means follow=True by default).
#Once a link is visited, do not follow it again
Rule(LinkExtractor(allow=('/homes.*?_p/', ), deny=('subsection\.php', )), follow=visited, ),
# Extract links matching 'homedetails' and parse them with the spider's method parse_house
Rule(LinkExtractor(allow=('/homedetails/', )), callback='parse_house'),
Rule(LinkExtractor(allow=('/community/', )), callback='parse_house'),
)
def __init__(self, url=None, prll=False, *args, **kwargs):
super(ZillowSpiderSpider, self).__init__(*args, **kwargs)
if (prll):
self.start_urls = [ url ]
else:
self.start_urls = gen_urls(url)
def link_callback(self,response):
#somehow couldnt remove this to another function
self.requests += 1
if (self.pauseEnabled & (self.requests % self.request_interval == 0)):
print("Pause")
self.request_interval = randint(0,self.max_interval)
pause_time = randint(0,200)/100
time.sleep(pause_time)
print("Paused " + str(pause_time) + "s")
def parse_house(self,response):
#wait a random amount of time to disguise spider
#time.sleep(randint(0,50)/100)
self.requests += 1
if (self.pauseEnabled & (self.requests % self.request_interval == 0)):
print("Pause")
self.request_interval = randint(1,self.max_interval)
pause_time = randint(0,200)/100
time.sleep(pause_time)
print("Paused " + str(pause_time) + "s")
house = HouseItem()
house['zillow_url'] = response.url
address_field = response.xpath('//h1/text()').extract()[0]
address_test = re.search( r'^(.*?),', address_field )
if (address_test == None):
house['address'] = address_field
else:
house['address'] = address_test.group(1)
house['city'] = re.search( r'^(.*?),', response.xpath('//h1/span/text()').extract()[0] ).group(1)
house['state'] = re.search( r',\s(.*?)\s', response.xpath('//h1/span/text()').extract()[0] ).group(1)
non_decimal = re.compile(r'[^\d.]+')
house['price'] = non_decimal.sub('', response.xpath('//*[contains(concat(" ", normalize-space(@class), " "), " main-row ")]/span/text()').extract()[0].replace(r'$', "").replace(r',', "").replace( "[^\\d]", "" ) )
house['sale_status'] = response.xpath('//*[contains(concat(" ", normalize-space(@class), " "), " status-icon-row ")]/text()').extract()[1].lstrip().rstrip()
stripped_line = house['sale_status'].strip()
if (stripped_line == ""):
house['sale_status'] = response.xpath('//*[contains(concat(" ", normalize-space(@class), " "), " status-icon-row ")]/span/text()').extract()[0]
zestimate_field = response.xpath('//*[contains(concat(" ", normalize-space(@class), " "), " zest-value ")]/text()').extract()[1]
if (zestimate_field != 'Unavailable'):
house['rent_zestimate'] = re.search( r'^(.*?)/', zestimate_field ).group(1).replace(r',', "").replace(r'$', "")
else:
house['rent_zestimate'] = -1;
bedroom_field = re.search( r'^(.*?)\s', response.xpath('//*[contains(concat(" ", normalize-space(@class), " "), " addr_bbs ")]/text()').extract()[0] )
if (bedroom_field != None):
house['bedrooms'] = bedroom_field.group(1)
else:
house['bedrooms'] = "Studio"
house['bathrooms'] = re.search( r'^(.*?)\s', response.xpath('//*[contains(concat(" ", normalize-space(@class), " "), " addr_bbs ")][2]/text()').extract()[0] ).group(1)
house['sqrft'] = re.search( r'^(.*?)\s', response.xpath('//*[contains(concat(" ", normalize-space(@class), " "), " addr_bbs ")][3]/text()').extract()[0] ).group(1).replace(r',', "")
lot_field = response.xpath('//*[contains(concat(" ", normalize-space(@class), " "), " zsg-list_square ")]/li[1]/text()').extract()[0]
lot_field_test = re.search( r'^([^0-9]*)$', lot_field)
if (lot_field_test != None):
house['lot_size'] = lot_field
else:
house['lot_size'] = re.search( r'\s(.*?)$', lot_field ).group(1).replace(r',', "")
house['id'] = re.search(r'/(\d*)_zpid', response.url).group(1)
#https://docs.python.org/2/library/datetime.html
house['timestamp'] = datetime.datetime.now().isoformat()
#Request Histories
soup = BeautifulSoup(response.body)
history_url = get_ajax_url(soup, "z-hdp-price-history")
tax_url = get_ajax_url(soup, "z-expando-table")
history_request = Request(history_url,
callback=self.parse_history)
history_request.meta['item'] = house
history_request.meta['tax_url'] = tax_url
house['tax_url'] = tax_url
return history_request
def parse_history(self,response):
#Parse Price History Table
house = response.meta['item']
tax_url = house['tax_url']
price_history = []
pattern = r' { "html": "(.*)" }'
html = re.search(pattern, response.body).group(1)
html = re.sub(r'\\"', r'"', html) # Correct escaped quotes
html = re.sub(r'\\/', r'/', html) # Correct escaped forward
if (html != ""):
soup = BeautifulSoup(html)
table = soup.find('table')
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele for ele in cols]
cols = cols[:3]
if (cols[2].find('span') != None):
date = cols[0].get_text()
event = cols[1].get_text()
price = cols[2].find('span').get_text()
price_history.append([date, event, price])
#Store history as JSON string
house['price_history'] = json.dumps(price_history)
tax_request = Request(tax_url,
callback=self.parse_taxes)
tax_request.meta['item'] = house
return tax_request
def parse_taxes(self,response):
#Parse Tax History Table
house = response.meta['item']
tax_history = []
pattern = r' { "html": "(.*)" }'
html = re.search(pattern, response.body).group(1)
html = re.sub(r'\\"', r'"', html) # Correct escaped quotes
html = re.sub(r'\\/', r'/', html) # Correct escaped forward
if (html != "") :
soup = BeautifulSoup(html)
table = soup.find('table')
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
try:
cols = row.find_all('td')
cols = [ele for ele in cols]
date = cols[0].get_text()
tax = cols[1].contents[0]
assessment = cols[3].get_text()
tax_history.append([date, tax, assessment])
except:
tax_history.append([Error])
house['tax_history'] = json.dumps(tax_history)
yield house |
from django.shortcuts import render,render_to_response
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from .forms import loginUser, registerUser
from django.contrib.auth.models import User,Group
from django.contrib.auth import logout
from django.core.exceptions import ObjectDoesNotExist
from .models import Homework, Record
from django.http import JsonResponse,HttpResponseRedirect, Http404
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.http import StreamingHttpResponse,HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django import forms
import codecs
# Create your views here.
@csrf_exempt
def login_user(request):
"""
登陆操作部分
:return: 登陆成功跳转至个人主页,失败则提示失败信息。
"""
if request.method == 'POST':
form = loginUser(request.POST)
if form.is_valid():
username = form.cleaned_data['Username']
password = form.cleaned_data['Password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
if user.groups.filter(name='Student').exists():
return HttpResponseRedirect('/upload/Account/')
else:
return HttpResponseRedirect('/upload/Teacher/')
else:
messages.error(request, '登录失败!')
else:
form = loginUser()
return render(request, 'login.html')
def register_user(request):
print(request)
if request.method == 'POST':
form = registerUser(request.POST)
print(form)
if form.is_valid():
print("OK")
if form.cleaned_data['Password']==form.cleaned_data['ConfirmPass']:
username = form.cleaned_data['Username']
if User.objects.filter(username__exact=username).count()==0:
password = form.cleaned_data['Password']
user = User.objects.create_user(username=username, password=password)
user.groups.add(Group.objects.get(name='Student'))
user.save()
messages.success(request, '注册成功!')
else:
messages.error(request, "该用户名已经被注册")
else:
messages.error(request, '两次输入的密码不匹配')
else:
form = registerUser()
return render(request, 'register.html')
@csrf_exempt
def Account(request):
"""
个人主页
:return: 渲染个人主页
"""
return render(request,'Account.html')
@csrf_exempt
def get_homeworks(request):
"""
处理数据库中作业相关信息,将其转化为json文件以供前端渲染
:return: 返回一个包含所有作业信息的json文件
"""
homeworks = Homework.objects.all()
resultdict = {}
dict = []
count = homeworks.count()
for h in homeworks:
dic = {}
dic['id'] = h.pk
dic['des'] = h.Description
dic['duedate'] = h.Deadline.strftime('%Y-%m-%d,%H:%M:%S')
if Record.objects.filter(Homework=h).filter(Student=request.user).count() > 0:
dic['status'] = "已提交"
if Record.objects.filter(Homework=h).get(Student=request.user).status == 2:
dic['grade'] = Record.objects.filter(Homework=h).get(Student=request.user).Scores
else:
dic['grade'] = '老师尚未打分'
else:
dic['status'] = "未提交"
dict.append(dic)
resultdict['data'] = dict
resultdict['code'] = 0
resultdict['msg'] = ""
resultdict['count'] = count
return JsonResponse(resultdict, safe=False)
@csrf_exempt
def upload_file(request,pk):
"""
处理上传文件
:return: 如果上传成功并成功保存,则返回一个json文件,其中statu=1表示成功,status=0则表示失败
"""
file = request.FILES.get('file')
filename = '%s/%s' % (settings.MEDIA_ROOT, file.name)
print(file.name)
with open(filename, 'wb')as f:
for ff in file.chunks():
f.write(ff)
ret = {'status': 1}
uploaded = Homework.objects.get(pk=pk)
Record.objects.create(Homework=uploaded, Student=request.user, Upload_time=timezone.now(), File=file).save()
return JsonResponse(ret)
@csrf_exempt
def Teacher(request):
return render(request, 'Teacher.html')
@csrf_exempt
def get_teacher_homeworks(request):
homeworks = Homework.objects.all()
resultdict={}
dict=[]
count=homeworks.count()
for h in homeworks:
dic={}
dic['id']=h.pk
dic['des']=h.Description
dic['duedate']=h.Deadline.strftime('%Y-%m-%d,%H:%M:%S')
dict.append(dic)
resultdict['data'] = dict
resultdict['code'] = 0
resultdict['msg'] = ""
resultdict['count'] = count
return JsonResponse(resultdict, safe=False)
@csrf_exempt
def assign(request):
if request.method == 'POST':
Homework.objects.create(Description=request.POST.get('Description'), Deadline=request.POST.get('Deadline')).save()
ret={'status': 1}
return render(request, 'Teacher.html')
else:
return HttpResponseRedirect('/')
def logout_view(request):
logout(request)
messages.success(request, "您已退出!")
return render(request, 'logout.html')
def batch_log(request):
return
@csrf_exempt
def download_homework(request, pk, id):
def file_iterator(file, chunk_size=512):
try:
with codecs.open(file, "r", "gbk") as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
except:
with codecs.open(file, "r", "utf8") as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
records =Record.objects.filter(Homework_id__exact=pk).get(Student__username__exact=id)
file = records.File
print(file.name)
records.status = 4
records.save()
filename = r'%s/%s' % (settings.MEDIA_ROOT, file.name)
print(filename)
response = StreamingHttpResponse(file_iterator(filename))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = "attachment;filename='{0}'".format(file)
print(response['Content-Disposition'])
return response
@csrf_exempt
def Record_List(request, pk):
deadline = Homework.objects.get(pk=pk).Deadline
records = Record.objects.filter(Homework_id__exact=pk).filter(Upload_time__lte=deadline)
resultdict = {}
dict = []
total = records.count()
page = request.POST.get('page')
rows = request.POST.get('limit')
i = (int(page) - 1) * int(rows)
j = (int(page) - 1) * int(rows) + int(rows)
records=records[i:j]
resultdict['total']=total
for r in records:
dic = {}
dic['id'] = r.Student.username
dic['homework'] = r.Homework.Description
dic['status'] = r.get_status_display()
if r.status == 2:
dic['score'] = r.Scores
dict.append(dic)
resultdict['data'] = dict
resultdict['code'] = 0
resultdict['msg'] = ""
resultdict['count'] = total
return JsonResponse(resultdict, safe=False)
@csrf_exempt
def Specific(request, pk):
return render(request, 'Record.html', {'pk':pk})
@csrf_exempt
def grade(request,pk,id):
if request.method == 'POST':
record = Record.objects.filter(Homework_id__exact=pk).get(Student__username__exact=id)
record.Scores = request.POST.get('grade')
record.status = 2
record.save()
return HttpResponseRedirect(reverse('des', args=(pk, )))
@csrf_exempt
def late_homeworks(request,pk):
deadline = Homework.objects.get(pk=pk).Deadline
records = Record.objects.filter(Homework_id__exact=pk).filter(Upload_time__gt=deadline)
resultdict = {}
dict = []
total = records.count()
page = request.POST.get('page')
rows = request.POST.get('limit')
i = (int(page) - 1) * int(rows)
j = (int(page) - 1) * int(rows) + int(rows)
records = records[i:j]
resultdict['total'] = total
for r in records:
dic = {}
dic['id'] = r.Student.username
dic['homework'] = r.Homework.Description
dic['status'] = r.get_status_display()
if r.status == 2:
dic['score'] = r.Scores
dict.append(dic)
resultdict['data'] = dict
resultdict['code'] = 0
resultdict['msg'] = ""
resultdict['count'] = total
return JsonResponse(resultdict, safe=False)
class ChangeForm(forms.Form):
username = forms.CharField(label='用户名')
old_password = forms.CharField(label='原密码',widget=forms.PasswordInput())
new_password = forms.CharField(label='新密码',widget=forms.PasswordInput())
@csrf_exempt
def change_pass(request):
if request.method == 'POST':
uf = ChangeForm(request.POST)
if uf.is_valid():
username = uf.cleaned_data['username']
old_password = uf.cleaned_data['old_password']
new_password = uf.cleaned_data['new_password']
##判断用户原密码是否匹配
user = authenticate(username=username, password=old_password)
if user is not None:
u=User.objects.get(username=username)
u.set_password(new_password)
u.save() ##如果用户名、原密码匹配则更新密码
messages.success(request, '修改成功!')
else:
messages.error(request, "请检查原密码与用户名是否输入正确!")
else:
uf = ChangeForm()
return render(request, 'change.html', {'form':uf}) |
import logging
import json
from datetime import datetime
from moxie.core.service import Service
from moxie.core.kv import kv_store
from moxie_food.domain import Meal
logger = logging.getLogger(__name__)
KEY_MEAL = 'meals'
KEY_UPDATED = 'last_updated'
class FoodService(Service):
def __init__(self, providers=None, service_key='food'):
"""Food service
:param providers: list of providers to be used
:param service_key: identifier of the service, mainly used when storing data
"""
self.provider = self._import_provider(providers.items()[0])
self.service_key = service_key
def import_meals(self):
"""Import meal data from provider
"""
meals = self.provider.import_meals()
data = json.dumps([meal.as_dict() for meal in meals])
kv_store.set(self._get_key(KEY_MEAL), data)
self._set_last_updated()
def get_meals(self):
"""Get meal data from storage
:return: Meal domain object
"""
data = kv_store.get(self._get_key(KEY_MEAL))
if not data:
return []
meals = json.loads(data)
return [Meal.from_dict(meal) for meal in meals]
def get_attribution(self):
"""Returns a dictionary containing attribution data
"""
return self.provider.ATTRIBUTION
def get_last_updated(self):
"""Get date of last update
"""
return kv_store.get(self._get_key(KEY_UPDATED))
def _get_key(self, key):
"""Get key used in kv store
:param key: key to format
:return: key formatted
"""
return "{app}_{key}".format(app=self.service_key, key=key)
def _set_last_updated(self):
"""Set the last updated date to now
"""
kv_store.set(self._get_key(KEY_UPDATED), datetime.now().isoformat())
|
from abstractcomponent import AbstractComponent
from b_text_block import BTextBlock
from ..gui_settings import *
from ...settings import *
class THeroHud(AbstractComponent):
def __init__(self,x,y,hero):
AbstractComponent.__init__(self,x,y,100,1000)
self.hero = hero
self._build_hud()
def _build_hud(self):
self.add_child_component(BTextBlock(0,0,lambda:str(self.hero.hero_string)))
self.add_child_component(BTextBlock(0,20,lambda:"LVL: "+str(self.hero.cur_lvl)))
self.add_child_component(BTextBlock(0,40,lambda:"XP: "+str(self.hero.cur_xp)+" / "+str(self.hero.next_lvl_xp)))
self.add_child_component(BTextBlock(0,60,lambda:"HP: "+str(self.hero.cur_hp)+" / "+str(self.hero.max_hp)))
self.add_child_component(BTextBlock(0,80,lambda:"Dice: "+str(self.hero.dice_modifier)))
self.add_child_component(BTextBlock(0,100,lambda:"Evade%: "+str(self.hero.evade_percent)+"%"))
self.add_child_component(BTextBlock(0,120,lambda:"Crit%: "+str(self.hero.crit_percent)+"%"))
self.add_child_component(BTextBlock(0,140,lambda:"Swords: "+str(self.hero.core_items['swords'])))
self.add_child_component(BTextBlock(0,160,lambda:"Wands: "+str(self.hero.core_items['wands'])))
self.add_child_component(BTextBlock(0,180,lambda:"Bows: "+str(self.hero.core_items['bows'])))
self.add_child_component(BTextBlock(0,200,lambda:"Gold: "+str(self.hero.core_items['gold'])))
self.add_child_component(BTextBlock(0,220,lambda:"Keys: "+str(self.hero.core_items['keys'])))
|
import cv2
import numpy as np
import matplotlib.pylab as plt
from tkinter import *
from tkinter import filedialog
root = Tk()
img1 = cv2.imread(filedialog.askopenfilename(title='multi-select images',
initialdir='C:/Users/',
filetypes=(('jpg files', '*.jpg'),
('all files', '*.*'))))
img2 = cv2.imread(filedialog.askopenfilename(title='multi-select images',
initialdir='C:/Users/',
filetypes=(('jpg files', '*.jpg'),
('all files', '*.*'))))
img3 = cv2.imread(filedialog.askopenfilename(title='multi-select images',
initialdir='C:/Users/',
filetypes=(('jpg files', '*.jpg'),
('all files', '*.*'))))
img4 = cv2.imread(filedialog.askopenfilename(title='multi-select images',
initialdir='C:/Users/',
filetypes=(('jpg files', '*.jpg'),
('all files', '*.*'))))
print(img1[:,:,::-1])
cv2.imshow('query', img1)
imgs = [img1, img2, img3, img4]
hists = []
for i, img in enumerate(imgs):
plt.subplot(1, len(imgs), i+1)
plt.title('img%d'%(i+1))
plt.axis('off')
plt.imshow(img[:,:,::-1])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0,1], None, [180,256],\
[0,180,0,256])
cv2.normalize(hist, hist, 0, 1, cv2.NORM_MINMAX)
hists.append(hist)
query = hists[0]
methods = {'CORREL': cv2.HISTCMP_CORREL,
'CHISQR':cv2.HISTCMP_CHISQR,
'INTER':cv2.HISTCMP_INTERSECT,
'BHATTACHARYYA':cv2.HISTCMP_BHATTACHARYYA}
for j, (name, flag) in enumerate(methods.items()):
print('%-10s'%name, end='\t')
for i, (hist, img) in enumerate(zip(hists, imgs)):
ret = cv2.compareHist(query, hist, flag)
if flag == cv2.HISTCMP_INTERSECT:
ret = ret/np.sum(query)
print('img%d:%7.2f'%(i+1, ret), end='\t')
print()
plt.show()
cv2.waitKey()
cv2.destroyAllWindows() |
import numpy as np
from sklearn import neighbors
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import os
class PredictionModel:
def __init__(self):
cur_dir = os.path.abspath(__file__)
cur_dir = os.path.dirname(cur_dir)
data = pd.read_csv(f'{cur_dir}/data.csv')
self.X = data.iloc[1:, 1:-1].values
self.y = -(data.iloc[1:, -1].values - 1)
self.clf = neighbors.KNeighborsClassifier(n_neighbors=3, p=2)
self.__preprocessing()
self.__train()
def __preprocessing(self):
self.X = self.__scale(self.X)
def __train(self):
self.clf.fit(self.X, self.y)
def __scale(self, data):
scaler = MinMaxScaler()
scaler.fit(np.array([[0, 0], [1023, 50]]))
return scaler.transform(data)
def predict(self, moisture, temp):
data = np.array([[moisture, temp]])
return self.clf.predict(self.__scale(data))[0] |
from rest_framework.permissions import BasePermission
class UsersPermission(BasePermission):
# Listado de usuarios: solo lo puede ver un usuario administrador (y por lo tanto autenticado)
# Creación de usuarios: cualquier usuario
# Detalle de usuario: los admin puede ver cualquier usuario, usuarios autenticados (no admin) pueden ver sus datos, no autenticados no pueden ver nada
# Actualización de usuario: los admin puede ver cualquier usuario, usuarios autenticados (no admin) pueden ver sus datos, no autenticados no pueden ver nada
# Borrado de usuario: los admin puede ver cualquier usuario, usuarios autenticados (no admin) pueden ver sus datos, no autenticados no pueden ver nada
def has_permission(self, request, view):
"""
Define si el usuario puede ejecutar una acción (GET, POST, PUT o DELETE) sobre la vista 'view'
"""
from users.api import UserDetailAPI #para eliminar la dependeica circular entre users/api y users/permission
if request.method == "POST" or request.user.is_superuser:
return True
if request.user.is_authenticated and request.method == "GET" and isinstance(view, UserDetailAPI):
return True
return request.user.is_authenticated and (request.method == "PUT" or request.method == "DELETE")
def has_object_permission(self, request, view, obj):
"""
El usuario autenticado (request.user) solo puede trabajar con el usuario solicitado (obj) si es el mismo o un administrador
"""
return request.user == obj or request.user.is_superuser |
from django.contrib import admin
import os
import time
from images.models import Video, Album, TFModel
from images.tasks import new_model
# Register your models here.
def close_album(modeladmin, request, queryset):
queryset.update(status='c')
def open_album(modeladmin, request, queryset):
queryset.update(status='o')
def create_model(modeladmin, request, queryset):
for album in queryset:
new_model.apply_async(args=[album.id], countdown=5)
close_album.short_description = "Close album to users"
open_album.short_description = "Make album available to users"
create_model.short_description = "Train model"
class AlbumAdmin(admin.ModelAdmin):
list_display = ['organization', 'name', 'description', 'pin', 'model_status', 'status']
ordering = ['name']
actions = [close_album, open_album, create_model]
class VideoAdmin(admin.ModelAdmin):
list_display = ['title', 'album']
ordering = ['title']
class TFModelAdmin(admin.ModelAdmin):
list_display = ['name', 'album_model']
ordering = ['name']
admin.site.register(Album, AlbumAdmin)
admin.site.register(Video, VideoAdmin)
admin.site.register(TFModel, TFModelAdmin)
|
from typing import Iterator, Iterable, Tuple, Sized, Union
from elasticsearch import Elasticsearch
from collections import OrderedDict
import math
import numpy as np
import gzip
import json
import csv
def read_json(data_file: str) -> Iterator:
"""read_json reads the content of a JSON-line format file, which has a JSON document on each line.
The gzip parameter can be used to read directly from gzipped files."""
if data_file.endswith('.gz'):
fh = gzip.open(data_file, 'rt')
else:
fh = open(data_file, 'rt')
for line in fh:
yield json.loads(line.strip())
fh.close()
def read_csv(data_file: str) -> Iterator:
"""read_csv reads the content of a csv file. The gzip parameter can be used to read directly from gzipped files."""
if data_file.endswith('.gz'):
fh = gzip.open(data_file, 'rt')
else:
fh = open(data_file, 'rt')
reader = csv.reader(fh, delimiter='\t')
headers = next(reader)
for row in reader:
yield {header: row[hi] for hi, header in enumerate(headers)}
fh.close()
def ecdf(data: Union[np.ndarray, Sized], reverse: bool = False) -> Tuple[Iterable, Iterable]:
"""Compute ECDF for a one-dimensional array of measurements.
This function is copied from Eric Ma's tutorial on Bayes statistics at
scipy 2019 https://github.com/ericmjl/bayesian-stats-modelling-tutorial"""
# Number of data points
n = len(data)
# x-data for the ECDF
x = np.sort(data)
if reverse:
x = np.flipud(x)
# y-data for the ECDF
y = np.arange(1, n+1) / n
return x, y
def scroll_hits(es: Elasticsearch, query: dict, index: str, size: int = 100) -> iter:
response = es.search(index=index, scroll='2m', size=size, body=query)
sid = response['_scroll_id']
scroll_size = response['hits']['total']
print('total hits:', scroll_size)
if type(scroll_size) == dict:
scroll_size = scroll_size['value']
# Start scrolling
while scroll_size > 0:
for hit in response['hits']['hits']:
yield hit
response = es.scroll(scroll_id=sid, scroll='2m')
# Update the scroll ID
sid = response['_scroll_id']
# Get the number of results that we returned in the last scroll
scroll_size = len(response['hits']['hits'])
# Do something with the obtained page
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
key_value_list = []
def output_value(jsons, key):
"""
通过参数key,在jsons中进行匹配并输出该key对应的value
:param jsons: 需要解析的json串
:param key: 需要查找的key
:return:
"""
key_value = ""
key_value_list1 = [1,2,3]
if isinstance(jsons, dict):
for json_result in jsons.values():
if key in jsons.keys():
key_value = jsons.get(key)
if len(key_value):
print (key_value)
key_value_list.append(key_value)
print (key_value_list)
else:
output_value(json_result, key)
elif isinstance(jsons, list):
for json_array in jsons:
output_value(json_array, key)
# if len(key_value):
# key_value_list.append(key_value)
return key_value_list
if __name__ =="__main__":
jsonsstr={"w": "猫香波","wq": "猫香波bobobobo","wor": [{"words":"旺旺"},{"words":"tom"}]}
out = output_value(jsonsstr, 'words')
print ("ceshi:",out)
|
# Yunlu Ma ID: 28072206
import tkinter
import get_point
import P5_logic
import set_dialogs
class Start_game:
# This Class used to build the first root window with a button of "Start Game"
# and run the game
def __init__(self):
# The __init__() fuction builds the tkinter.Tk() with a button of "Start Game"
self._root_window = tkinter.Tk()
setting_button = tkinter.Button(
master = self._root_window, text = 'Start Game', font = ('Helvetica', 14),
command = self._set)
setting_button.grid(
row = 0, column = 0, padx = 10, pady = 10,
sticky = tkinter.S)
self._root_window.rowconfigure(0, weight = 1)
self._root_window.columnconfigure(0, weight = 1)
def _set(self) -> None:
# When button was clicked call the Class with setting dialogs of the rule of Othello
# When setting finished, click OK to begin game
set_game = set_dialogs.Dialogs()
set_game.show()
if set_game.was_ok_clicked():
self._root_window.destroy()
game = Gameboard(set_game._row_number,set_game._col_number,set_game._turn.get(),set_game._winning_way.get())
game.run()
def run(self) -> None:
# Run the entire game project
self._root_window.mainloop()
class Gameboard:
# This Class contains the core part of the Othello includes the gameboard, notations and game logic
def __init__(self,row_number:str,col_number:str,first_turn:str,winning_way:str):
# The __init__() fuction builds the canvas for the gameboard on the new tkinter.Tk() and also import Othello's logic
self._point_list = []
self._useful_list =[]
self._count = 0
self._root_window = tkinter.Tk()
self._row = int(row_number)
self._col = int(col_number)
self._first_set = 'B'
self._winning_way = winning_way
self._turn = tkinter.StringVar()
self._turn.set(first_turn)
self._black = tkinter.StringVar()
self._white = tkinter.StringVar()
self._change_set_color_clicked = False
self._start_to_play_clicked = False
self.Othello = P5_logic.Othello(self._row,self._col,self._turn.get(),self._winning_way)
self._canvas = tkinter.Canvas(
master = self._root_window,
width = 500, height = 500,
background = 'pink')
self._canvas.grid(
row = 0, column = 0, padx = 5, pady = 5,
sticky = tkinter.N + tkinter.S + tkinter.W + tkinter.E)
self._result_frame = tkinter.Frame(master = self._root_window )
self._result_frame.grid(
row = 0, column = 1, rowspan = 2, padx = 10, pady = 10,
sticky = tkinter.E + tkinter.N
)
self._set_text = tkinter.StringVar()
self._set_text.set('Set Black discs first!')
set_black_discs_lable = tkinter.Label(
master = self._result_frame, textvariable = self._set_text,
font = ('Helvetica', 14))
set_black_discs_lable.grid(
row = 0, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
self._change_set_color = tkinter.Button(
master = self._result_frame, text = 'Set White discs',
font = ('Helvetica', 14), command = self._change_set_color)
self._change_set_color.grid(
row = 1, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
self._canvas.bind('<Configure>', self._on_canvas_resized)
self._canvas.bind('<Button-1>', self._on_canvas_clicked)
self._root_window.rowconfigure(0, weight = 1)
self._root_window.columnconfigure(0, weight = 1)
def run(self) -> None:
# Run the core part of the game
self._root_window.mainloop()
def _change_set_color(self) -> None:
# Change the color to "white" when setting the game board
self._change_set_color_clicked = True
self._set_text.set('Now Set White discs!')
self._first_set = "W"
self._change_button()
def _change_button(self) -> None:
# Remove the button 'Now Set White discs!' when it was clicked
# Add the button "Start to Play!!!" at the same position when the player is setting the white discs
if self._change_set_color_clicked:
self._change_set_color.grid_remove()
self._start_to_play = tkinter.Button(
master = self._result_frame, text = 'Start to Play!!!',
font = ('Helvetica', 14), command = self._begin_to_play)
self._start_to_play.grid(
row = 1, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
def _begin_to_play(self) -> None:
# Show the gameboard which was setted by the player and show it on canvas
# Show the important information like Welcome, Winning Way, Turn and the number of discs in different colors
self._start_to_play_clicked = True
self._set_text.set('Welcome to Othello!')
self._start_to_play.grid_remove()
self.Othello.build_board()
for click_point in self._point_list:
if click_point._color == "B":
self.Othello._board[click_point._row][click_point._col] = "B"
else:
self.Othello._board[click_point._row][click_point._col] = "W"
self.Othello.count_number()
self._black.set(str(self.Othello._black))
self._white.set(str(self.Othello._white))
winning_way_label = tkinter.Label(
master = self._result_frame, text = "Winning Way: " + self._winning_way,
font = ('Helvetica', 14))
winning_way_label.grid(
row = 2, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
Change_frame = tkinter.Frame(master = self._result_frame)
Change_frame.grid(row = 4, column = 0, padx = 10, pady = 10,
sticky = tkinter.W + tkinter.N)
turn_notation_label = tkinter.Label(
master = Change_frame, text = "TURN: ",
font = ('Helvetica', 14))
turn_notation_label.grid(
row = 0, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
turn_label = tkinter.Label(
master = Change_frame, textvariable = self._turn,
font = ('Helvetica', 14))
turn_label.grid(
row = 0, column = 1, padx = 10, pady = 10,
sticky = tkinter.E)
Black_notation_label = tkinter.Label(
master = Change_frame, text = "BLACK: ",
font = ('Helvetica', 14))
Black_notation_label.grid(
row = 1, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
Black_number_label = tkinter.Label(
master = Change_frame, textvariable = self._black,
font = ('Helvetica', 14))
Black_number_label.grid(
row = 1, column = 1, padx = 10, pady = 10,
sticky = tkinter.E)
White_notation_label = tkinter.Label(
master = Change_frame, text = "WHITE: ",
font = ('Helvetica', 14))
White_notation_label.grid(
row = 2, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
White_number_label = tkinter.Label(
master = Change_frame, textvariable = self._white,
font = ('Helvetica', 14))
White_number_label.grid(
row = 2, column = 1, padx = 10, pady = 10,
sticky = tkinter.E)
if self.Othello.check_for_winner():
self.Othello.winner_system(self._winning_way)
winner_label = tkinter.Label(
master = self._result_frame, text = "WINNER: " + self.Othello._winner,
font = ('Helvetica', 14))
winner_label.grid(
row = 2, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
else:
self._turn.set(self.Othello._turn)
def _check_valid_point(self,click_point):
# Check the click point is a valid move based on the game logic
for lst in self.Othello._total_list:
if click_point._row == lst[-1][0] and click_point._col == lst[-1][1]:
if self._count == 0:
self._count += 1
self._point_list.append(click_point)
self._useful_list.append(lst)
continue
if self._count != 0:
self.Othello.change_color(click_point._row, click_point._col,self._useful_list)
self.change_color()
self._redraw_all_spots()
self.Othello.count_number()
self._black.set(str(self.Othello._black))
self._white.set(str(self.Othello._white))
self.Othello.opposite_turn()
self._turn.set(self.Othello._turn)
def change_color(self) -> None:
# Make the change of the color of discs on the board if the player drop the correct disc
for lst in self._useful_list:
for position in lst[:-1]:
for click_point in self._point_list:
if click_point._row == position[0] and click_point._col == position[1]:
click_point._color = self._turn.get()
def _on_canvas_resized(self, event: tkinter.Event) -> None:
# Keep all the stuffs on the canvas when resizing
self._draw_lines()
self._redraw_all_spots()
def _draw_lines(self) -> None:
# Draw the line of the gameboard on canvas based on the row number and col number
self._canvas.delete(tkinter.ALL)
canvas_width = self._canvas.winfo_width()
canvas_height = self._canvas.winfo_height()
for i in range(1,self._row):
self._canvas.create_line(0, canvas_height * (i/self._row), canvas_width, canvas_height * (i/self._row), fill = 'black')
for i in range(1,self._col):
self._canvas.create_line(canvas_width * (i/self._col), 0, canvas_width * (i/self._col), canvas_height, fill = 'black')
def _on_canvas_clicked(self, event: tkinter.Event) -> None:
# Handle the click on the canvas based with different methods based on the situation
width = self._canvas.winfo_width()
height = self._canvas.winfo_height()
if self._start_to_play_clicked:
click_point = get_point.from_pixel(
event.x, event.y, width, height,self._turn.get())
self._get_disc_row(click_point)
self._get_disc_col(click_point)
self.Othello.total_game()
self._reset()
self._check_valid_point(click_point)
if self.Othello.check_for_winner():
self.Othello.winner_system(self._winning_way)
winner_label = tkinter.Label(
master = self._result_frame, text = "WINNER: " + self.Othello._winner,
font = ('Helvetica', 14))
winner_label.grid(
row = 2, column = 0, padx = 10, pady = 10,
sticky = tkinter.W)
else:
self._turn.set(self.Othello._turn)
else:
click_point = get_point.from_pixel(
event.x, event.y, width, height,self._first_set)
self._get_disc_row(click_point)
self._get_disc_col(click_point)
if self._count == 0:
self._point_list.append(click_point)
self._redraw_all_spots()
self._count += 1
else:
l=[]
alist = [self._point_list[0]]
for point in self._point_list:
if (click_point._row == point._row) and (click_point._col == point._col):
return
else:
pass
self._point_list.append(click_point)
self._redraw_all_spots()
def _reset(self):
# Reset the list and the variable before using them
self._useful_list = []
self._count = 0
def _redraw_all_spots(self) -> None:
# Draw the discs on the board with different colors
self._canvas.delete(tkinter.ALL)
self._draw_lines()
canvas_width = self._canvas.winfo_width()
canvas_height = self._canvas.winfo_height()
for click_point in self._point_list:
x_coords = self._check_x_coords(click_point)
y_coords = self._check_y_coords(click_point)
color = click_point.color()
self._canvas.create_oval(
x_coords[0] * canvas_width, y_coords[0] * canvas_height,
x_coords[1] * canvas_width, y_coords[1] * canvas_height,
fill = color, outline = "black")
def _check_x_coords(self,click_point)-> tuple:
# Find the x coordinate of the click point by making the translation based on the size of the canvas
center_x = click_point.frac()[0]
for i in range(self._col):
if i/self._col <= center_x < (i+1)/self._col:
x_coords = (i/self._col,(i+1)/self._col)
return x_coords
def _check_y_coords(self,click_point) -> tuple:
# Find the y coordinate of the click point by making the translation based on the size of the canvas
center_y = click_point.frac()[1]
for i in range(self._row):
if i/self._row <= center_y < (i+1)/self._row:
y_coords = (i/self._row,(i+1)/self._row)
return y_coords
def _get_disc_col(self,click_point) -> int:
# Find the col of the click point on the gameboard
center_x = click_point.frac()[0]
for i in range(self._col):
if i/self._col <= center_x < (i+1)/self._col:
col = i
click_point.add_col(col)
def _get_disc_row(self,click_point) -> int:
# Find the row of the click point on the gameboard
center_y = click_point.frac()[1]
for i in range(self._row):
if i/self._row <= center_y < (i+1)/self._row:
row = i
click_point.add_row(row)
if __name__ == '__main__':
app = Start_game()
app.run()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-13 12:23
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('testapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BlogMassage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('massage', models.TextField(null=True)),
('created', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('massage', models.TextField(null=True)),
('created', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('upper_comment', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lower', to='testapp.Comment')),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(null=True)),
('title', models.CharField(max_length=200)),
('description', models.TextField(null=True)),
],
),
migrations.CreateModel(
name='ForumSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='ForumTheme',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('created', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
('fixed', models.BooleanField(default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('root_comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Comment', unique=True)),
('section', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.ForumSection')),
],
),
migrations.CreateModel(
name='Lesson',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(null=True)),
('auditorium', models.CharField(max_length=10)),
('template', models.BooleanField(default=False)),
('lecturer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('massage', models.TextField(null=True)),
('created', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('root_comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Comment', unique=True)),
],
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField(null=True)),
('lecturer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SubunitToSubject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Subject')),
],
),
migrations.AddField(
model_name='userprofile',
name='birthday',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='userprofile',
name='description',
field=models.TextField(null=True),
),
migrations.AlterField(
model_name='subunit',
name='upper_subunit',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lower', to='testapp.Subunit'),
),
migrations.AlterField(
model_name='userprofile',
name='subunit',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='testapp.Subunit'),
),
migrations.AddField(
model_name='subunittosubject',
name='subunit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Subunit'),
),
migrations.AddField(
model_name='news',
name='subunit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Subunit'),
),
migrations.AddField(
model_name='lesson',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Subject'),
),
migrations.AddField(
model_name='event',
name='subunit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Subunit'),
),
migrations.AddField(
model_name='blogmassage',
name='root_comment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Comment', unique=True),
),
migrations.AddField(
model_name='subunit',
name='forum',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='testapp.ForumSection', unique=True),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import netCDF4
__author__ = 'kmu'
"""
Retrieve data from netcdf files from thredds.met.no or \hdata\grid.
"""
def _nc_info(nc_data):
print('### DIMENSIONS ###')
print(nc_data.dimensions)
for k in nc_data.dimensions.keys():
print("-\t{0}".format(k))
print('### VARIABLES ###')
for k in nc_data.variables.keys():
print("-\t{0}".format(k))
def nc_load(nc_object, vars, bounding_box=None, time_period=None):
"""
Dimensions for the nc-files on thredds are y, x or time, y, x.
:param nc_object: filename or URL of netCDF file, e.g. './Data/arome_metcoop_default2_5km_latest.nc' or 'http://thredds.met.no/thredds/dodsC/arome25/arome_metcoop_default2_5km_latest.nc'
:param vars: list of variables that should be retrieved, e.g. []
:param bounding_box: list of lat lons [S, N, E, W] to define a rectangular shape to be clipped out
:param time_period: list of start and end time, e.g. []
:return:
"""
# Access netcdf file via OpenDAP
nc = netCDF4.Dataset(nc_object)
# Get content
_nc_info(nc)
# Get coordinates and other standard variables
try:
x_var = nc.variables['x']
y_var = nc.variables['y']
except KeyError:
print("Variables 'x' and 'y' are not provided.")
try:
latitude_var = nc.variables['latitude']
longitude_var = nc.variables['longitude']
except KeyError:
try:
latitude_var = nc.variables['lat']
longitude_var = nc.variables['lon']
except KeyError:
print("Variables 'lat/latitude' and 'lon/longitude' are not provided.")
time_var = nc.variables['time']
try:
altitude_var = nc.variables['altitude']
except KeyError:
print("Variable 'altitude' is not provided.")
try:
land_area_fraction_var = nc.variables['land_area_fraction']
except KeyError:
print("Variable 'land_area_fraction' is not provided.")
nc_vars = {}
# Apply bounding box if given
if bounding_box is not None:
lat1 = np.where(latitude_var[:] >= bounding_box[0])[1][0]
lat2 = np.where(latitude_var[:] <= bounding_box[1])[1][-1]
lon1 = np.where(longitude_var[:] >= bounding_box[2])[1][0]
lon2 = np.where(longitude_var[:] <= bounding_box[3])[1][-1]
print(lon1, lon2, lat1, lat2)
altitude = altitude_var[lon1:lon2, lat1:lat2] # retrieve model topography
try:
land_area_fraction = land_area_fraction_var[lon1:lon2, lat1:lat2]
except UnboundLocalError:
land_area_fraction = None
for var in vars:
nc_vars[var] = nc.variables[var][:].squeeze()[time_period[0]:time_period[1], lon1:lon2, lat1:lat2]
else:
try:
altitude = altitude_var[:, :]
except UnboundLocalError:
altitude = None
try:
land_area_fraction = land_area_fraction_var[lon1:lon2, lat1:lat2]
except UnboundLocalError:
land_area_fraction = None
for var in vars:
nc_vars[var] = nc.variables[var][:].squeeze()[time_period[0]:time_period[1], :, :]
times = time_var[time_period[0]:time_period[1]]
jd = netCDF4.num2date(times[:], time_var.units)
return jd, altitude, land_area_fraction, nc_vars
if __name__ == "__main__":
ncfile = r"\\hdata\grid\metdata\prognosis\meps\det\archive\2019\meps_det_extracted_1km_20190404T00Z.nc"
jd, altitude, land_area_fraction, nc_vars = nc_load(ncfile, ["altitude_of_0_degree_isotherm"], time_period=[7, 8])
from grid_data import SeNorgeGrid
sg = SeNorgeGrid('Freezing level')
sg.from_ndarray(nc_vars['altitude_of_0_degree_isotherm'])
#TODO: check correct shape; decide if third dimensions should be removed
k = 'm'
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for running distributed actor/learner tests."""
import functools
from absl import logging
import numpy as np
import reverb
import tensorflow as tf
from tf_agents.agents.dqn import dqn_agent
from tf_agents.agents.ppo import ppo_clip_agent
from tf_agents.environments import suite_gym
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.networks import actor_distribution_network
from tf_agents.networks import sequential
from tf_agents.networks import value_network
from tf_agents.policies import py_tf_eager_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.specs import tensor_spec
from tf_agents.train import actor
from tf_agents.train.utils import replay_buffer_utils
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
def configure_logical_cpus():
"""Configures exactly 4 logical CPUs for the first physical CPU.
Assumes no logical configuration exists or it was configured the same way.
**Note**: The reason why the number of logical CPUs fixed is because
reconfiguring the number of logical CPUs once the underlying runtime has been
initialized is not supported (raises `RuntimeError`). So, with this choice it
is ensured that tests running in the same process calling this function
multiple times do not break.
"""
first_cpu = tf.config.list_physical_devices('CPU')[0]
try:
logical_devices = [
tf.config.experimental.VirtualDeviceConfiguration() for _ in range(4)
]
tf.config.experimental.set_virtual_device_configuration(
first_cpu, logical_devices=logical_devices
)
logging.info(
'No current virtual device configuration. Defining 4 virtual CPUs on '
'the first physical one.'
)
except RuntimeError:
current_config = tf.config.experimental.get_virtual_device_configuration(
first_cpu
)
logging.warn(
'The following virtual device configuration already exists: %s which '
'resulted this call to fail with `RuntimeError` since it is not '
'possible to reconfigure it after runtime initialization. It is '
'probably safe to ignore.',
current_config,
)
def get_cartpole_env_and_specs():
env = suite_gym.load('CartPole-v0')
_, action_tensor_spec, time_step_tensor_spec = spec_utils.get_tensor_specs(
env
)
return env, action_tensor_spec, time_step_tensor_spec
def build_dummy_sequential_net(fc_layer_params, action_spec):
"""Build a dummy sequential network."""
num_actions = action_spec.maximum - action_spec.minimum + 1
logits = functools.partial(
tf.keras.layers.Dense,
activation=None,
kernel_initializer=tf.random_uniform_initializer(
minval=-0.03, maxval=0.03
),
bias_initializer=tf.constant_initializer(-0.2),
)
dense = functools.partial(
tf.keras.layers.Dense,
activation=tf.keras.activations.relu,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(
scale=2.0, mode='fan_in', distribution='truncated_normal'
),
)
return sequential.Sequential(
[dense(num_units) for num_units in fc_layer_params]
+ [logits(num_actions)]
)
def create_ppo_agent_and_dataset_fn(
action_spec, time_step_spec, train_step, batch_size
):
"""Builds and returns a dummy PPO Agent, dataset and dataset function."""
del action_spec # Unused.
del time_step_spec # Unused.
del batch_size # Unused.
# No arbitrary spec supported.
obs_spec = tensor_spec.TensorSpec([2], tf.float32)
ts_spec = ts.time_step_spec(obs_spec)
act_spec = tensor_spec.BoundedTensorSpec([1], tf.float32, -1, 1)
actor_net = actor_distribution_network.ActorDistributionNetwork(
obs_spec,
act_spec,
fc_layer_params=(100,),
activation_fn=tf.keras.activations.tanh,
)
value_net = value_network.ValueNetwork(
obs_spec, fc_layer_params=(100,), activation_fn=tf.keras.activations.tanh
)
agent = ppo_clip_agent.PPOClipAgent(
ts_spec,
act_spec,
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
actor_net=actor_net,
value_net=value_net,
entropy_regularization=0.0,
importance_ratio_clipping=0.2,
normalize_observations=False,
normalize_rewards=False,
use_gae=False,
use_td_lambda_return=False,
num_epochs=1,
debug_summaries=False,
summarize_grads_and_vars=False,
train_step_counter=train_step,
compute_value_and_advantage_in_train=False,
)
def _create_experience(_):
observations = tf.constant(
[
[[1, 2], [3, 4], [5, 6]],
[[1, 2], [3, 4], [5, 6]],
],
dtype=tf.float32,
)
mid_time_step_val = ts.StepType.MID.tolist()
time_steps = ts.TimeStep(
step_type=tf.constant([[mid_time_step_val] * 3] * 2, dtype=tf.int32),
reward=tf.constant([[1] * 3] * 2, dtype=tf.float32),
discount=tf.constant([[1] * 3] * 2, dtype=tf.float32),
observation=observations,
)
actions = tf.constant([[[0], [1], [1]], [[0], [1], [1]]], dtype=tf.float32)
action_distribution_parameters = {
'loc': tf.constant([[[0.0]] * 3] * 2, dtype=tf.float32),
'scale': tf.constant([[[1.0]] * 3] * 2, dtype=tf.float32),
}
value_preds = tf.constant(
[[9.0, 15.0, 21.0], [9.0, 15.0, 21.0]], dtype=tf.float32
)
policy_info = {
'dist_params': action_distribution_parameters,
}
policy_info['value_prediction'] = value_preds
experience = trajectory.Trajectory(
time_steps.step_type,
observations,
actions,
policy_info,
time_steps.step_type,
time_steps.reward,
time_steps.discount,
)
return agent._preprocess(experience) # pylint: disable=protected-access
dataset = tf.data.Dataset.from_tensor_slices([[i] for i in range(100)]).map(
_create_experience
)
dataset = tf.data.Dataset.zip((dataset, tf.data.experimental.Counter()))
dataset_fn = lambda: dataset
return agent, dataset, dataset_fn, agent.training_data_spec
def create_dqn_agent_and_dataset_fn(
action_spec, time_step_spec, train_step, batch_size
):
"""Builds and returns a dataset function for DQN Agent."""
q_net = build_dummy_sequential_net(
fc_layer_params=(100,), action_spec=action_spec
)
agent = dqn_agent.DqnAgent(
time_step_spec,
action_spec,
q_network=q_net,
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
train_step_counter=train_step,
)
agent.initialize()
def make_item(_):
traj = tensor_spec.sample_spec_nest(
agent.collect_data_spec, seed=123, outer_dims=[2]
)
def scale_observation_only(item):
# Scale float values in the sampled item by large value to avoid NaNs.
if item.dtype == tf.float32:
return tf.math.divide(item, 1.0e22)
else:
return item
return tf.nest.map_structure(scale_observation_only, traj)
l = []
for i in range(100):
l.append([i])
dataset = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(l).map(make_item),
tf.data.experimental.Counter(),
))
dataset_fn = lambda: dataset.batch(batch_size)
return agent, dataset, dataset_fn, agent.collect_data_spec
def build_actor(root_dir, env, agent, rb_observer, train_step):
"""Builds the Actor."""
tf_collect_policy = agent.collect_policy
collect_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_collect_policy, use_tf_function=True
)
temp_dir = root_dir + 'actor'
test_actor = actor.Actor(
env,
collect_policy,
train_step,
steps_per_run=1,
metrics=actor.collect_metrics(10),
summary_dir=temp_dir,
observers=[rb_observer],
)
return test_actor
def get_actor_thread(test_case, reverb_server_port, num_iterations=10):
"""Returns a thread that runs an Actor."""
def build_and_run_actor():
root_dir = test_case.create_tempdir().full_path
env, action_tensor_spec, time_step_tensor_spec = (
get_cartpole_env_and_specs()
)
train_step = train_utils.create_train_step()
q_net = build_dummy_sequential_net(
fc_layer_params=(100,), action_spec=action_tensor_spec
)
agent = dqn_agent.DqnAgent(
time_step_tensor_spec,
action_tensor_spec,
q_network=q_net,
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
train_step_counter=train_step,
)
_, rb_observer = replay_buffer_utils.get_reverb_buffer_and_observer(
agent.collect_data_spec,
table_name=reverb_replay_buffer.DEFAULT_TABLE,
sequence_length=2,
reverb_server_address='localhost:{}'.format(reverb_server_port),
)
variable_container = reverb_variable_container.ReverbVariableContainer(
server_address='localhost:{}'.format(reverb_server_port),
table_names=[reverb_variable_container.DEFAULT_TABLE],
)
test_actor = build_actor(root_dir, env, agent, rb_observer, train_step)
variables_dict = {
reverb_variable_container.POLICY_KEY: agent.collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step,
}
variable_container.update(variables_dict)
for _ in range(num_iterations):
test_actor.run()
actor_thread = test_case.checkedThread(target=build_and_run_actor)
return actor_thread
def check_variables_different(test_case, old_vars_numpy, new_vars_numpy):
"""Tests whether the two sets of variables are different.
Useful for checking if variables were updated, i.e. a train step was run.
Args:
test_case: an instande of tf.test.TestCase for assertions
old_vars_numpy: numpy representation of old variables
new_vars_numpy: numpy representation of new variables
"""
# Check if there is a change.
def changed(a, b):
return not np.equal(a, b).all()
vars_changed = tf.nest.flatten(
tf.nest.map_structure(changed, old_vars_numpy, new_vars_numpy)
)
# Assert if any of the variable changed.
test_case.assertTrue(np.any(vars_changed))
def check_variables_same(test_case, old_vars_numpy, new_vars_numpy):
"""Tests whether the two sets of variables are the same.
Useful for checking if variables were not updated, i.e. a loss step was run.
Args:
test_case: an instande of tf.test.TestCase for assertions
old_vars_numpy: numpy representation of old variables
new_vars_numpy: numpy representation of new variables
"""
# Check that there is no change.
def same(a, b):
return np.equal(a, b).all()
vars_same = tf.nest.flatten(
tf.nest.map_structure(same, old_vars_numpy, new_vars_numpy)
)
# Assert if all of the variables are the same.
test_case.assertTrue(np.all(vars_same))
def create_reverb_server_for_replay_buffer_and_variable_container(
collect_policy, train_step, replay_buffer_capacity, port
):
"""Sets up one reverb server for replay buffer and variable container."""
# Create the signature for the variable container holding the policy weights.
variables = {
reverb_variable_container.POLICY_KEY: collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step,
}
variable_container_signature = tf.nest.map_structure(
lambda variable: tf.TensorSpec(variable.shape, dtype=variable.dtype),
variables,
)
# Create the signature for the replay buffer holding observed experience.
replay_buffer_signature = tensor_spec.from_spec(
collect_policy.collect_data_spec
)
replay_buffer_signature = tensor_spec.add_outer_dim(replay_buffer_signature)
# Crete and start the replay buffer and variable container server.
server = reverb.Server(
tables=[
reverb.Table( # Replay buffer storing experience.
name=reverb_replay_buffer.DEFAULT_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
# TODO(b/159073060): Set rate limiter for SAC properly.
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=replay_buffer_capacity,
max_times_sampled=0,
signature=replay_buffer_signature,
),
reverb.Table( # Variable container storing policy parameters.
name=reverb_variable_container.DEFAULT_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=1,
max_times_sampled=0,
signature=variable_container_signature,
),
],
port=port,
)
return server
|
"""Models and database functions for project"""
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import joinedload
import datetime
# This is the connection to the PostgreSQL database; we're getting this through
# the Flask-SQLAlchemy helper library. On this, we can find the `session`
# object, where we do most of our interactions (like committing, etc.)
db = SQLAlchemy()
def connect_to_db(app):
"""Connect the database to our Flask app."""
# Configure to use our PstgreSQL database
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///jobtracker'
# app.config['SQLALCHEMY_ECHO'] = True
db.app = app
db.init_app(app)
if __name__ == "__main__":
# As a convenience, if we run this module interactively, it will leave
# you in a state of being able to work with the database directly.
from server import app
connect_to_db(app)
print "Connected to DB." |
import torch.nn as tn
import torch.nn.functional as tnf
import torch.utils.data as tud
import torch.utils.data.dataloader as tuddl
import torch.utils.data.dataset as tudds
import torch.autograd.variable as tav
import torchvision
import torchvision.transforms as tvt
class SiameseNetwork(tn.Module):
def __init__(self):
super(SiameseNetwork, self).__init__()
self.cnn = tn.Sequential(
tn.Conv2d(1, 96, kernel_size=11, stride=1),
tn.ReLU(inplace=True),
tn.LocalResponseNorm(5, alpha=0.0001, beta=0.75, k=2),
tn.MaxPool2d(3, stride=2),
tn.Conv2d(96, 256, kernel_size=5, stride=1, padding=2),
tn.ReLU(inplace=True),
tn.LocalResponseNorm(5, alpha=0.0001, beta=0.75, k=2),
tn.MaxPool2d(3, stride=2),
tn.Dropout(p=0.3),
tn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1),
tn.ReLU(inplace=True),
tn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),
tn.ReLU(inplace=True),
tn.MaxPool2d(3, stride=2),
tn.Dropout(p=0.3)
)
self.fc = tn.Sequential(
tn.Linear(30976, 1024),
tn.ReLU(inplace=True),
tn.Dropout(p=0.5),
tn.Linear(1024, 128),
tn.ReLU(inplace=True),
tn.Linear(128, 2)
)
def forward_one(self, x):
output = self.cnn(x)
output = output.view(output.size()[0], -1)
output = self.fc(output)
return output
def forward(self, input1, input2):
output1 = self.forward_one(input1)
output2 = self.forward_one(input2)
return output1, output2
|
#basket에서 인형을 삭제할지 판단하는 함
def determinator(answer, basket):
if(basket[-1] == basket[-2]):
basket.pop()
basket.pop()
answer += 2
return answer
else:
return answer
#각각의 칸에서 가장 높은 곳에 있는 인형을 찾는 함수
def find_top(board, m):
for height in range(len(board)):
if board[height][m] == 0:
continue
else:
temp_v = board[height][m]
board[height][m] = 0
return temp_v
def solution(board, moves):
answer = 0
basket = []
#크레인으로 옮긴 인형이 쌓일 임시 리스트
for m in moves:
top = find_top(board, m-1)
if top is not None:
basket.append(top)
if len(basket) > 1:
answer = determinator(answer, basket)
return answer
|
import linecache
def client_id():
file = open('id.txt', 'r')
second_line = linecache.getline('id.txt', 1)
actual_line = second_line.strip()
file.close()
return actual_line
def secret_id():
file = open('id.txt', 'r')
second_line = linecache.getline('id.txt', 2)
actual_line = second_line.strip()
file.close()
return actual_line
def discord_token():
file = open('id.txt', 'r')
second_line = linecache.getline('id.txt', 3)
actual_line = second_line.strip()
file.close()
return actual_line |
from hed.schema.hed_schema_constants import HedKey
import copy
class HedTag:
""" A single HED tag.
Notes:
- HedTag is a smart class in that it keeps track of its original value and positioning
as well as pointers to the relevant HED schema information, if relevant.
"""
def __init__(self, hed_string, hed_schema, span=None, def_dict=None):
""" Creates a HedTag.
Parameters:
hed_string (str): Source hed string for this tag.
hed_schema (HedSchema): A parameter for calculating canonical forms on creation.
span (int, int): The start and end indexes of the tag in the hed_string.
def_dict(DefinitionDict or None): The def dict to use to identify def/def expand tags.
"""
self._hed_string = hed_string
if span is None:
span = (0, len(hed_string))
# This is the span into the original hed string for this tag
self.span = span
# If this is present, use this as the org tag for most purposes.
# This is not generally used anymore, but you can use it to replace a tag in place.
self._tag = None
self._namespace = self._get_schema_namespace(self.org_tag)
# This is the schema this tag was converted to.
self._schema = None
self._schema_entry = None
self._extension_value = ""
self._parent = None
self._expandable = None
self._expanded = False
self._calculate_to_canonical_forms(hed_schema)
if def_dict:
def_dict.construct_def_tag(self)
def copy(self):
""" Return a deep copy of this tag.
Returns:
HedTag: The copied group.
"""
save_parent = self._parent
self._parent = None
return_copy = copy.deepcopy(self)
self._parent = save_parent
return return_copy
@property
def schema_namespace(self):
""" Library namespace for this tag if one exists.
Returns:
namespace (str): The library namespace, including the colon.
"""
return self._namespace
@property
def short_tag(self):
""" Short form including value or extension.
Returns:
short_tag (str): The short form of the tag, including value or extension.
"""
if self._schema_entry:
return f"{self._namespace}{self._schema_entry.short_tag_name}{self._extension_value}"
return str(self)
@property
def base_tag(self):
""" Long form without value or extension.
Returns:
base_tag (str): The long form of the tag, without value or extension.
"""
if self._schema_entry:
return self._schema_entry.long_tag_name
return str(self)
@property
def short_base_tag(self):
""" Short form without value or extension
Returns:
base_tag (str): The short non-extension port of a tag.
Notes:
- ParentNodes/Def/DefName would return just "Def".
"""
if self._schema_entry:
return self._schema_entry.short_tag_name
return str(self)
@short_base_tag.setter
def short_base_tag(self, new_tag_val):
""" Change base tag, leaving extension or value.
Parameters:
new_tag_val (str): The new short_base_tag for this tag.
:raises ValueError:
- If the tag wasn't already identified
Note:
- Generally this is used to swap def to def-expand.
"""
if self._schema_entry:
tag_entry = None
if self._schema:
if self.is_takes_value_tag():
new_tag_val = new_tag_val + "/#"
tag_entry = self._schema.get_tag_entry(new_tag_val, schema_namespace=self.schema_namespace)
self._schema_entry = tag_entry
else:
raise ValueError("Cannot set unidentified tags")
@property
def org_base_tag(self):
""" Original form without value or extension.
Returns:
base_tag (str): The original form of the tag, without value or extension.
Notes:
- Warning: This could be empty if the original tag had a name_prefix prepended.
e.g. a column where "Label/" is prepended, thus the column value has zero base portion.
"""
if self._schema_entry:
extension_len = len(self._extension_value)
if not extension_len:
return self.tag
org_len = len(self.tag)
if org_len == extension_len:
return ""
return self.tag[:org_len - extension_len]
return str(self)
def tag_modified(self):
""" Return true if tag has been modified from original.
Returns:
bool: Return True if the tag is modified.
Notes:
- Modifications can include adding a column name_prefix.
"""
return bool(self._tag)
@property
def tag(self):
""" Returns the tag.
Returns the original tag if no user form set.
Returns:
tag (str): The custom set user form of the tag.
"""
if self._tag:
return self._tag
return self.org_tag
@tag.setter
def tag(self, new_tag_val):
""" Allow you to overwrite the tag output text.
Parameters:
new_tag_val (str): New (implicitly long form) of tag to set.
Notes:
- You probably don't actually want to call this.
"""
self._tag = new_tag_val
self._schema_entry = None
self._calculate_to_canonical_forms(self._schema)
@property
def extension(self):
""" Get the extension or value of tag
Generally this is just the portion after the last slash.
Returns an empty string if no extension or value.
Returns:
str: The tag name.
Notes:
- This tag must have been computed first.
"""
if self._extension_value:
return self._extension_value[1:]
return ""
@extension.setter
def extension(self, x):
self._extension_value = f"/{x}"
@property
def long_tag(self):
""" Long form including value or extension.
Returns:
str: The long form of this tag.
"""
if self._schema_entry:
return f"{self._namespace}{self._schema_entry.long_tag_name}{self._extension_value}"
return str(self)
@property
def org_tag(self):
""" Return the original unmodified tag.
Returns:
str: The original unmodified tag.
"""
return self._hed_string[self.span[0]:self.span[1]]
@property
def tag_terms(self):
""" Return a tuple of all the terms in this tag Lowercase.
Returns:
tag_terms (str): Tuple of terms or empty tuple for unidentified tag.
Notes:
- Does not include any extension.
"""
if self._schema_entry:
return self._schema_entry.tag_terms
return tuple()
@property
def expanded(self):
"""Returns if this is currently expanded or not.
Will always be false unless expandable is set. This is primarily used for Def/Def-expand tags at present.
Returns:
bool: Returns true if this is currently expanded
"""
return self._expanded
@property
def expandable(self):
"""Returns if this is expandable
This is primarily used for Def/Def-expand tags at present.
Returns:
HedGroup or HedTag or None: Returns the expanded form of this tag
"""
return self._expandable
def is_column_ref(self):
""" Returns if this tag is a column reference from a sidecar.
You should only see these if you are directly accessing sidecar strings, tools should remove them otherwise.
Returns:
bool: Returns True if this is a column ref
"""
return self.org_tag.startswith('{') and self.org_tag.endswith('}')
def __str__(self):
""" Convert this HedTag to a string.
Returns:
str: The original tag if we haven't set a new tag.(e.g. short to long).
"""
if self._schema_entry:
return self.short_tag
if self._tag:
return self._tag
return self._hed_string[self.span[0]:self.span[1]]
def lower(self):
""" Convenience function, equivalent to str(self).lower(). """
return str(self).lower()
def _calculate_to_canonical_forms(self, hed_schema):
""" Update internal state based on schema.
Parameters:
hed_schema (HedSchema or HedSchemaGroup): The schema to use to validate this tag
Returns:
list: A list of issues found during conversion. Each element is a dictionary.
"""
tag_entry, remainder, tag_issues = hed_schema.find_tag_entry(self, self.schema_namespace)
self._schema_entry = tag_entry
self._schema = hed_schema
if self._schema_entry:
if remainder:
self._extension_value = remainder
return tag_issues
def get_stripped_unit_value(self):
""" Return the extension portion without units.
Returns:
stripped_unit_value (str): The extension portion with the units removed.
unit (str or None): None if no valid unit found.
Examples:
'Duration/3 ms' will return '3'
"""
tag_unit_classes = self.unit_classes
stripped_value, unit = self._get_tag_units_portion(tag_unit_classes)
if stripped_value:
return stripped_value, unit
return self.extension, None
@property
def unit_classes(self):
""" Return a dict of all the unit classes this tag accepts.
Returns:
unit_classes (dict): A dict of unit classes this tag accepts.
Notes:
- Returns empty dict if this is not a unit class tag.
- The dictionary has unit name as the key and HedSchemaEntry as value.
"""
if self._schema_entry:
return self._schema_entry.unit_classes
return {}
@property
def value_classes(self):
""" Return a dict of all the value classes this tag accepts.
Returns:
dict: A dictionary of HedSchemaEntry value classes this tag accepts.
Notes:
- Returns empty dict if this is not a value class.
- The dictionary has unit name as the key and HedSchemaEntry as value.
"""
if self._schema_entry:
return self._schema_entry.value_classes
return {}
@property
def attributes(self):
""" Return a dict of all the attributes this tag has.
Returns empty dict if this is not a value tag.
Returns:
dict: A dict of attributes this tag has.
Notes:
- Returns empty dict if this is not a unit class tag.
- The dictionary has unit name as the key and HedSchemaEntry as value.
"""
if self._schema_entry:
return self._schema_entry.attributes
return {}
def tag_exists_in_schema(self):
""" Get the schema entry for this tag.
Returns:
bool: True if this tag exists.
Notes:
- This does NOT assure this is a valid tag.
"""
return bool(self._schema_entry)
def is_takes_value_tag(self):
""" Return true if this is a takes value tag.
Returns:
bool: True if this is a takes value tag.
"""
if self._schema_entry:
return self._schema_entry.has_attribute(HedKey.TakesValue)
return False
def is_unit_class_tag(self):
""" Return true if this is a unit class tag.
Returns:
bool: True if this is a unit class tag.
"""
if self._schema_entry:
return bool(self._schema_entry.unit_classes)
return False
def is_value_class_tag(self):
""" Return true if this is a value class tag.
Returns:
bool: True if this is a tag with a value class.
"""
if self._schema_entry:
return bool(self._schema_entry.value_classes)
return False
def is_basic_tag(self):
""" Return True if a known tag with no extension or value.
Returns:
bool: True if this is a known tag without extension or value.
"""
return bool(self._schema_entry and not self.extension)
def has_attribute(self, attribute):
""" Return true if this is an attribute this tag has.
Parameters:
attribute (str): Name of the attribute.
Returns:
bool: True if this tag has the attribute.
"""
if self._schema_entry:
return self._schema_entry.has_attribute(attribute)
return False
def is_extension_allowed_tag(self):
""" Check if tag has 'extensionAllowed' attribute.
Recursively checks parent tag entries for the attribute as well.
Returns:
bool: True if the tag has the 'extensionAllowed' attribute. False, if otherwise.
"""
if self.is_takes_value_tag():
return False
if self._schema_entry:
return self._schema_entry.any_parent_has_attribute(HedKey.ExtensionAllowed)
return False
def get_tag_unit_class_units(self):
""" Get the unit class units associated with a particular tag.
Returns:
list: A list containing the unit class units associated with a particular tag or an empty list.
"""
units = []
unit_classes = self.unit_classes
for unit_class_entry in unit_classes.values():
units += unit_class_entry.units.keys()
return units
def get_unit_class_default_unit(self):
""" Get the default unit class unit for this tag.
Returns:
str: The default unit class unit associated with the specific tag or an empty string.
"""
default_unit = ''
unit_classes = self.unit_classes.values()
if unit_classes:
first_unit_class_entry = list(unit_classes)[0]
default_unit = first_unit_class_entry.has_attribute(HedKey.DefaultUnits, return_value=True)
return default_unit
def base_tag_has_attribute(self, tag_attribute):
""" Check to see if the tag has a specific attribute.
Parameters:
tag_attribute (str): A tag attribute.
Returns:
bool: True if the tag has the specified attribute. False, if otherwise.
"""
if not self._schema_entry:
return False
return self._schema_entry.base_tag_has_attribute(tag_attribute)
def any_parent_has_attribute(self, attribute):
""" Check if the tag or any of its parents has the attribute.
Parameters:
attribute (str): The name of the attribute to check for.
Returns:
bool: True if the tag has the given attribute. False, if otherwise.
"""
if self._schema_entry:
return self._schema_entry.any_parent_has_attribute(attribute=attribute)
@staticmethod
def _get_schema_namespace(org_tag):
""" Finds the library namespace for the tag.
Parameters:
org_tag (str): A string representing a tag.
Returns:
str: Library namespace string or empty.
"""
first_slash = org_tag.find("/")
first_colon = org_tag.find(":")
if first_colon != -1:
if first_slash != -1 and first_colon > first_slash:
return ""
return org_tag[:first_colon + 1]
return ""
def _get_tag_units_portion(self, tag_unit_classes):
""" Check that this string has valid units and remove them.
Parameters:
tag_unit_classes (dict): Dictionary of valid UnitClassEntry objects for this tag.
Returns:
stripped_value (str): The value with the units removed.
"""
value, _, units = self.extension.rpartition(" ")
if not units:
return None, None
for unit_class_entry in tag_unit_classes.values():
all_valid_unit_permutations = unit_class_entry.derivative_units
possible_match = self._find_modifier_unit_entry(units, all_valid_unit_permutations)
if possible_match and not possible_match.has_attribute(HedKey.UnitPrefix):
return value, units
# Repeat the above, but as a prefix
possible_match = self._find_modifier_unit_entry(value, all_valid_unit_permutations)
if possible_match and possible_match.has_attribute(HedKey.UnitPrefix):
return units, value
return None, None
@staticmethod
def _find_modifier_unit_entry(units, all_valid_unit_permutations):
possible_match = all_valid_unit_permutations.get(units)
# If we have a match that's a unit symbol, we're done, return it.
if possible_match and possible_match.has_attribute(HedKey.UnitSymbol):
return possible_match
possible_match = all_valid_unit_permutations.get(units.lower())
# Unit symbols must match including case, a match of a unit symbol now is something like M becoming m.
if possible_match and possible_match.has_attribute(HedKey.UnitSymbol):
possible_match = None
return possible_match
def is_placeholder(self):
if "#" in self.org_tag or "#" in self._extension_value:
return True
return False
def replace_placeholder(self, placeholder_value):
""" If tag has a placeholder character(#), replace with value.
Parameters:
placeholder_value (str): Value to replace placeholder with.
"""
if self.is_placeholder():
if self._schema_entry:
self._extension_value = self._extension_value.replace("#", placeholder_value)
else:
self._tag = self.tag.replace("#", placeholder_value)
def __hash__(self):
if self._schema_entry:
return hash(
self._namespace + self._schema_entry.short_tag_name.lower() + self._extension_value.lower())
else:
return hash(self.lower())
def __eq__(self, other):
if self is other:
return True
if isinstance(other, str):
return self.lower() == other
if not isinstance(other, HedTag):
return False
if self.short_tag.lower() == other.short_tag.lower():
return True
if self.org_tag.lower() == other.org_tag.lower():
return True
return False
def __deepcopy__(self, memo):
# check if the object has already been copied
if id(self) in memo:
return memo[id(self)]
# create a new instance of HedTag class
new_tag = self.__class__.__new__(self.__class__)
new_tag.__dict__.update(self.__dict__)
# add the new object to the memo dictionary
memo[id(self)] = new_tag
# Deep copy the attributes that need it(most notably, we don't copy schema/schema entry)
new_tag._parent = copy.deepcopy(self._parent, memo)
new_tag._expandable = copy.deepcopy(self._expandable, memo)
new_tag._expanded = copy.deepcopy(self._expanded, memo)
return new_tag
|
import turtle
def circle():
while turtle.heading() < 359:
turtle.forward(1)
turtle.left(1)
turtle.left(1)
def poly(r, teta):
n = 360 / teta
while n > 0:
n = n - 1
turtle.forward(r)
turtle.left(teta)
n = 10
while n > 0:
n = n - 1
poly(10, 30)
turtle.forward(40)
turtle.done()
|
import requests
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import time
URL = 'https://www.gismeteo.ru/'
plt.ion()
fig, ax = plt.subplots()
temp_data = []
time_data = []
start_time = time.time()
first_time = True
while (1):
if (time.time() - start_time >= 60 or first_time):
first_time = False
start_time = time.time()
page = requests.get(URL, headers = {'User-agent': 'Mozilla/5.0 (Windows NT \
10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.150 \
Safari/537.36'})
soup = BeautifulSoup(page.content, 'html.parser')
target = soup.find_all('div', class_='js_meas_container temperature')
try:
current_temp = float(target[0]['data-value'])
except IndexError:
continue
temp_data.append(current_temp)
time_data.append(start_time)
ax.scatter(time_data, temp_data, c='r')
fig.canvas.draw()
fig.canvas.flush_events()
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Post(models.Model):
user = models.ForeignKey(User, on_delete=models.PROTECT)
text = models.CharField(max_length=400, blank=True, null=False)
image = models.ImageField(upload_to='images/')
created_at = models.DateField(auto_now=True)
updated_at = models.DateField(auto_now_add=True)
def __str__(self):
return self.text
|
#Guessing Game
import random
game_over = False
SECRET = random.randint(1,100)
#
modes = {"easy":10,"hard":5}
print(SECRET)
print('Welcome to the Guessing Game')
MODE_CHOICE = input("'easy' or 'hard'? :").lower()
remaining_guesses = modes[MODE_CHOICE]
print('Guess the right number between 1-100 to win')
def guess():
global SECRET
global remaining_guesses
global game_over
while remaining_guesses > 0 or game_over == False:
user_guess = int(input('Pick a number: '))
if user_guess > SECRET:
remaining_guesses -=1
print(f'too high, {remaining_guesses} guesses remain')
elif user_guess < SECRET:
remaining_guesses-=1
print(f'too low, {remaining_guesses} guesses remain')
else:
print('Correct!')
remaining_guesses -= remaining_guesses
game_over = True
else:
print('Game Over!')
guess() |
from django.apps import AppConfig
class IntraTypeDataConfig(AppConfig):
name = 'intra_type_data'
|
import importlib
import os
import pickle
from pytracking.evaluation.environment import env_settings
class Tracker:
"""Wraps the tracker for evaluation and running purposes.
args:
name: Name of tracking method.
parameter_name: Name of parameter file.
run_id: The run id.
"""
def __init__(self, name: str, parameter_name: str, run_id: int = None):
self.name = name
self.parameter_name = parameter_name
self.run_id = run_id
env = env_settings()
if self.run_id is None:
self.results_dir = '{}/{}/{}'.format(env.results_path, self.name, self.parameter_name)
else:
self.results_dir = '{}/{}/{}_{:03d}'.format(env.results_path, self.name, self.parameter_name, self.run_id)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
tracker_module = importlib.import_module('pytracking.tracker.{}'.format(self.name))
self.parameters = self.get_parameters()
self.tracker_class = tracker_module.get_tracker_class()
self.default_visualization = getattr(self.parameters, 'visualization', False)
self.default_debug = getattr(self.parameters, 'debug', 0)
def run(self, seq, visualization=None, debug=None):
"""Run tracker on sequence.
args:
seq: Sequence to run the tracker on.
visualization: Set visualization flag (None means default value specified in the parameters).
debug: Set debug level (None means default value specified in the parameters).
"""
visualization_ = visualization
debug_ = debug
if debug is None:
debug_ = self.default_debug
if visualization is None:
if debug is None:
visualization_ = self.default_visualization
else:
visualization_ = True if debug else False
self.parameters.visualization = visualization_
self.parameters.debug = debug_
tracker = self.tracker_class(self.parameters)
output_bb, execution_times = tracker.track_sequence(seq)
self.parameters.free_memory()
return output_bb, execution_times
def run_vot(self, imgtype, debug=None):
"""Run the tracker with the webcam.
args:
debug: Debug level.
"""
debug_ = debug
if debug is None:
debug_ = self.default_debug
self.parameters.debug = debug_
self.parameters.tracker_name = self.name
self.parameters.param_name = self.parameter_name
tracker = self.tracker_class(self.parameters)
tracker.track_vot(imgtype)
def run_vot2(self, imgtype, debug=None):
"""Run the tracker with the webcam.
args:
debug: Debug level.
"""
debug_ = debug
if debug is None:
debug_ = self.default_debug
self.parameters.debug = debug_
self.parameters.tracker_name = self.name
self.parameters.param_name = self.parameter_name
tracker = self.tracker_class(self.parameters)
tracker.track_vot2(imgtype)
def get_parameters(self):
"""Get parameters."""
parameter_file = '{}/parameters.pkl'.format(self.results_dir)
if os.path.isfile(parameter_file):
return pickle.load(open(parameter_file, 'rb'))
param_module = importlib.import_module('pytracking.parameter.{}.{}'.format(self.name, self.parameter_name))
params = param_module.parameters()
if self.run_id is not None:
pickle.dump(params, open(parameter_file, 'wb'))
return params
|
#
# Comparison between different number of grid points in mesh
#
import pybamm
from tec_reduced_model.set_parameters import set_thermal_parameters
pybamm.set_logging_level("INFO")
# Define TDFN with a lumped themral model
model = pybamm.lithium_ion.DFN(
options={
"thermal": "lumped",
"dimensionality": 0,
"cell geometry": "arbitrary",
},
name="TDFN",
)
# Change simulation parameters here
temperature = 25 # in degC
Crate = 1
# Define parameter set Chen 2020 (see PyBaMM documentation for details)
# This is the reference parameter set, which is later adjusted for the temperature
param = pybamm.ParameterValues(chemistry=pybamm.parameter_sets.Chen2020)
param = set_thermal_parameters(param, 20, 2.85e6, temperature)
mesh_factors = [1, 2, 4, 8]
solutions = []
var = pybamm.standard_spatial_vars
for factor in mesh_factors:
var_pts = {
var.x_n: 20 * factor,
var.x_s: 20 * factor,
var.x_p: 20 * factor,
var.r_n: 30 * factor,
var.r_p: 30 * factor,
var.y: 10,
var.z: 10,
}
sim = pybamm.Simulation(
model,
parameter_values=param,
var_pts=var_pts,
C_rate=Crate,
)
sim.model.name
sim.solve([0, 3600])
sim.solution.model.name += " x{} mesh".format(factor)
solutions.append(sim.solution)
pybamm.dynamic_plot(solutions)
|
import datetime
from pychesscom.clients.base_client import BaseClient
from pychesscom.utils.response import Response
from pychesscom.utils.route import Route
class Player:
"""
Class for handling endpoints of player information.
Args:
client(BaseClient): HTTP client for API requests
"""
def __init__(self, client: BaseClient):
self._client = client
async def get_details(self, username: str) -> Response:
"""
Get profile details of a player.
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-player
Args:
username(str): The username of a player on chess.com
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_details('erik')
print(response)
"""
route = Route(f'player/{username}')
response = await self._client.request(route)
return response
async def get_stats(self, username: str) -> Response:
"""
Get stats of a player.
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-player-stats
Args:
username(str): The username of a player on chess.com
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_stats('erik')
print(response)
"""
route = Route(f'player/{username}/stats')
response = await self._client.request(route)
return response
async def get_online_status(self, username: str) -> Response:
"""
Get online status of a player.
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-player-is-online
Args:
username(str): The username of a player on chess.com
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_online_status('erik')
print(response)
"""
route = Route(f'player/{username}/is-online')
response = await self._client.request(route)
return response
async def get_clubs(self, username: str) -> Response:
"""
Get clubs of a player.
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-player-clubs
Args:
username(str): The username of a player on chess.com
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_clubs('erik')
print(response)
"""
route = Route(f'player/{username}/clubs')
response = await self._client.request(route)
return response
async def get_matches(self, username: str) -> Response:
"""
Get team matches of a player.
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-player-matches
Args:
username(str): The username of a player on chess.com
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_matches('erik')
print(response)
"""
route = Route(f'player/{username}/matches')
response = await self._client.request(route)
return response
async def get_tournaments(self, username: str) -> Response:
"""
Get tournaments of a player.
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-player-tournaments
Args:
username(str): The username of a player on chess.com
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_tournaments('erik')
print(response)
"""
route = Route(f'player/{username}/tournaments')
response = await self._client.request(route)
return response
async def get_current_games(self, username: str) -> Response:
"""
Get current games of a player.
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-games-current
Args:
username(str): The username of a player on chess.com
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_current_games('erik')
print(response)
"""
route = Route(f'player/{username}/games')
response = await self._client.request(route)
return response
async def get_current_games_to_move(self, username: str) -> Response:
"""
Get current games of a player where it is the player's turn to move.
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-games-tomove
Args:
username(str): The username of a player on chess.com
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_current_games_to_move('erik')
print(response)
"""
route = Route(f'player/{username}/games/to-move')
response = await self._client.request(route)
return response
async def get_monthly_archive(self, username: str) -> Response:
"""
Get monthly archives of a player.
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-games-archive-list
Args:
username(str): The username of a player on chess.com
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_monthly_archive('erik')
print(response)
"""
route = Route(f'player/{username}/games/archives')
response = await self._client.request(route)
return response
async def get_games(self, username: str, year: int, month: int) -> Response:
"""
Get games of a player in a particular month.
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-games-archive
Args:
username(str): The username of a player on chess.com
year(int): Year of archive
month(int): Month of archive
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_monthly_archive('erik', 2009, 10)
print(response)
"""
if month < 10:
month = f'0{month}'
route = Route(f'player/{username}/games/{year}/{month}')
response = await self._client.request(route)
return response
async def get_titled_players(self, title: str) -> Response:
"""
Get titled players..
Chess.com API: https://www.chess.com/news/view/published-data-api#pubapi-endpoint-titled
Args:
title(str): The title abbreviation
Returns:
Response: Response of API request
Example:
.. code-block:: python
from pychesscom import ChessComClient
client = ChessComClient()
response = await client.player.get_titled_players('GM')
print(response)
"""
route = Route(f'titled/{title}')
response = await self._client.request(route)
return response
|
#coding:utf-8
from struct import pack,unpack
import numpy as np
class MecanumBase():
def __init__(self):
self.dir=[]
self.v=0
self.av=0
self.t1=0
self.t2=0
def __encode__(self,vel,angle,angle_v,angle_vd):
if vel<0: vel = 0
vel = int(vel)%3000
angle_v = abs(int(angle_v))
angle = abs(angle)%360 # safty parameters
Lv = vel%256 #low byte of vel
Hv = (vel>>8)%256 #hight byte of vel
La = angle%256 #low byte of angle
Ha = angle>>8 #high byte of angle
av = angle_v%256 #angle vel. unit 0.1degree/s
avd = abs(angle_vd)%2 #direction of angle vel. 0 counter clockwise, 1 clockwise
check = 255-(161+Lv+Hv+La+Ha+av+avd)%256 #check byte
cmd = [85,170,30,253,8,161,Lv,Hv,La,Ha,av,avd,0,check]
buf = map(lambda i:pack('B',i),cmd)
buffer = ''.join(buf)
return buffer
def stop(self):
return self.__encode__(0,0,0,0)
def translateV(self,v,d): #mm/s
'''translate with vel v, angle d, directly
d: 0-forward 90 left 180 back 270 right
'''
if v<0:v=0
v = 0.815*v #calibrate the vel
return self.__encode__(v,d,0,0)
def __encode__A(self,v): #v度/s 分辨率0.1du/s
''' v: angle velocity, - counter clockwise, - counter clockwise '''
if v<0: d = 0
elif v>0: d = 1
else: return self.__encode__(0,0,0,0)
v = 0.8*v
return self.__encode__(0,0,abs(v)*10,d)
def rotateV(self,v): #度/s
'''rotate with vel v, rudely
v positive->counterclockwise'''
return self.__encode__A(v)
#turn
def _dir(self,v):
if v>=0: return 0
elif v<0: return 180
def __encode__T(self,v,r):
''' v: velocity r: the radius of the turn '''
self.c_tv = v
V = abs(v)
R = abs(r)
av = 180*V/(3.14*R)
VV = int(V+0.5*av)
if v>0:
if r<=0: d = 0
else: d = 1
else:
if r<=0: d = 1
else: d = 0
return self.__encode__(VV,self._dir(v),av*10,d)
def turn(self,v,r):
'''turn with car style. v-vel: +forward;-backward; r-radius of turn: -left turn; + right turn.'''
return self.__encode__T(v,r)
#bychan
def __encode__M(self,v,d,av):
if v<0:v=0
v = 0.815*v #calibrate the vel
if av<0: ad = 0
elif av>=0: ad = 1
#else: return self.__encode__(0,0,0,0)
av = 0.8*av
return self.__encode__(v,d,abs(av)*10,ad)
def mulM(self,v,d,av):
return self.__encode__M(v,d,av)
#规划部分
def Dir_planM(self,d,av,t,prd):
n=int(t/prd)
self.dir=[]#下次调用之前清空方向规划列表
# print 'd',d
for i in range(n):
temp=d+av*prd*i
if temp<0:
self.dir.append(360+temp)
elif temp>360:
self.dir.append(temp-360)
else:self.dir.append(temp)
def cal_tabD_V(self,l,a,v,d,av,prd,tag):
#a:-:clockwise +:counter clockwise
# print 'a1',d
# print 'a2',a
self.t1=l/v
self.t2=abs(a/av)
t=min(self.t1,self.t2)
if tag:
# print t1,t2
t=max(self.t1,self.t2)
#print t
v=l/(t+1e-6)#保证安全
av=-(abs(a)/(t+1e-6)*np.sign(a))
# print v,av
self.Dir_planM(d,av,t,prd)
self.v=v
self.av=av
# print self.dir
return self.t1,self.t2
def get_dirM(self):
if len(self.dir)-1:#存留一个
return self.v,self.av,self.dir.pop(0)
elif self.t1>self.t2:
return self.v,0,self.dir[0]
else:
return 0,self.av,0
def setPort(self,port='wireless'):
#控制选择,支持有线和蓝牙端口。任何时候任何一个端口都可以通过该命令抢占控制权。
if port == 'wire':
cmd = [85,170,30,253,8,188,0,0,0,0,0,0,0,67]
return ''.join([pack('B',i) for i in cmd])
elif port == 'wireless':
cmd = [85,170,30,253,8,187,0,0,0,0,0,0,0,68]
return ''.join([pack('B',i) for i in cmd]) |
#Addison, due to limitations in my knowledge, we have to settle with this display class. All this does is give a specific entry from the nested "allTime" list.
#The class requires three variables: the huge nested list from the calInputClass, which will be unpacked.
#The week number, starting from 0, and the weekday number (0-6).
#The class outputs two things: eCal (the recorded number of calories eaten for that specific day), and rCal (the required amount of calories for that day)
#The GUI for the display will have to be different. The user will have to input a specific week and day, and the program will return the required calories and recorded calories for that day.
import time, pickle
class Load:
def __init__ (self, allTime, week, wday):
self.allTime = allTime
self.week = week
self.wday = wday
self.unpack()
def unpack (self):
eCal = self.allTime[self.week][self.wday][0]
rCal = self.allTime[self.week][self.wday][1]
print self.allTime
print eCal, rCal |
from django.db import models
from apps.users.models import *
from django.shortcuts import reverse
from apps.users.models import Student
class Status(models.Model):
title=models.CharField(max_length=100);
slug=models.SlugField(max_length=255)
def __str__(self):
return self.title
#relation containg all genre of books
class Genre(models.Model):
name = models.CharField(max_length=200, help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
slug=models.SlugField(max_length=255)
def __str__(self):
return self.name
## __str__ method is used to override default string returnd by an object
##relation containing language of books
class Language(models.Model):
name = models.CharField(max_length=200,
help_text="Enter the book's natural language (e.g. English, French, Japanese etc.)")
def __str__(self):
return self.name
#book relation that has 2 foreign key author language
#book relation can contain multiple genre so we have used manytomanyfield
class Book(models.Model):
title = models.CharField(max_length=200)
author = models.CharField(max_length=100)
category=models.CharField(max_length=100,default="Featured",blank=True,null=True,help_text="Featured,MostWished,Education,BestSeller")
summary = models.TextField(max_length=1000, help_text="Enter a brief description of the book")
isbn = models.CharField('ISBN', max_length=13,
help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
genre = models.ManyToManyField(Genre, help_text="Select a genre for this book",related_name='books')
language = models.ForeignKey('Language', on_delete=models.SET_NULL, null=True)
total_copies = models.IntegerField()
available_copies = models.IntegerField()
borrowing_duration=models.ForeignKey('Borrowing_duration',on_delete=models.CASCADE,default=7,blank=True,null=True,)
status=models.ForeignKey(Status,blank=True,null=True,on_delete=models.CASCADE,related_name='books')
pic=models.ImageField(blank=True, null=True, upload_to='uploads/book_image/%Y%m%d/',default='uploads/books/default.png')
def getImageURL(self):
if self.pic.url and hasattr(self.pic,'url'):
return self.pic.url
else:
return 'uploads/users/default.jpg'
#return canonical url for an object
def get_absolute_url(self):
return reverse('book-detail', args=[str(self.id)])
def __str__(self):
return self.title
#relation containing info about Borrowed books
#it has foriegn key book and student for refrencing book nad student
#roll_no is used for identifing students
#if a book is returned than corresponding tuple is deleted from database
class Borrower(models.Model):
student = models.ForeignKey(Student, on_delete=models.CASCADE,related_name='borrowers')
book = models.ForeignKey(Book, on_delete=models.CASCADE,related_name='borrowers')
issue_date = models.DateField(null=True,blank=True)
return_date = models.DateField(null=True,blank=True)
def __str__(self):
return self.student.fname+" borrowed "+self.book.title
class Borrowing_duration(models.Model):
duration_allowed=models.IntegerField(help_text="Enter duration in terms of days")
def __str__(self):
return str(self.duration_allowed)
class Meta:
verbose_name_plural="Set book borrowing duration"
class Reviews(models.Model):
review=models.CharField(max_length=100,default="none")
book=models.ForeignKey(Book,on_delete=models.CASCADE,related_name='reviews')
user = models.ForeignKey(User, on_delete=models.CASCADE,related_name='reviews')
CHOICES = (
('0', '0'),
('.5', '.5'),
('1', '1'),
('1.5', '1.5'),
('2', '2'),
('2.5', '2.5'),
('3', '3'),
('3.5', '3.5'),
('4', '4'),
('4.5', '4.5'),
('5', '5'),
)
rating=models.CharField(max_length=3, choices=CHOICES, default='1')
def __str__(self):
return self.book.title
class Late_return_charge(models.Model):
borrower=models.ForeignKey(Borrower,related_name='charges',on_delete=models.CASCADE)
late_days=models.IntegerField(default=0,max_length=100)
charge=models.DecimalField(max_digits=6,decimal_places=2)
def __str__(self):
return f"Charge({self.charge}) for late({self.late_days} days late) return of \"{self.borrower.book.title}\" by {self.borrower.student.fname}" |
'''
q1
with语句适用于对资源进行访问的场合,确保不管使用过程中是否发生异常都会执行必要的"清理"工作
主要用于释放资源
比如说:文件适用后的自动关闭;线程中锁的自动获取和释放
'''
f = open('files/readme.txt','r')
data = f.read()
print(data)
f.close()
'''
这么写存在两个问题:
1、没有关闭文件
2、即使关闭了文件,但在关闭之前如果抛出异常,仍然会无法关闭文件
'''
f = open('files/readme.txt','r')
try:
data = f.read()
except:
print('抛出异常')
# 防止了第二个问题;仍然存在第一个问题;即没有调用close(),就无法关闭文件
finally:
f.close()
# 保证肯定能关闭文件
# with语句执行完,自动调用close()方法
with open('files/readme.txt','r') as f:
data = f.read()
print(data)
'''
q2:将with语句用于自定义的类
魔法函数
__enter__(函数调用之前调用)
__exit__(类里函数调用之后调用)
'''
class MyClass:
def __enter__(self):
print('__enter__ is call!')
return self
def process1(self):
print('process1')
def process2(self):
# 抛出异常
x = 1/0
print('process2')
# exc_type:传入 traceback:抛出异常时使用;无异常返回空
def __exit__(self, exc_type, exc_val, traceback):
print('__exit__ is call')
print(f'type:{exc_type}')
print(f'value:{exc_val}')
print(f'trace:{traceback}')
with MyClass() as my:
my.process1()
my.process2()
|
from aws_lambda_typing.events import SNSEvent
def test_sns_event() -> None:
event: SNSEvent = {
"Records": [
{
"EventVersion": "1.0",
"EventSubscriptionArn": "arn:aws:sns:us-east-2:123456789012:sns-lambda:21be56ed-a058-49f5-8c98-aedd2564c486", # noqa: E501
"EventSource": "aws:sns",
"Sns": {
"SignatureVersion": "1",
"Timestamp": "2019-01-02T12:45:07.000Z",
"Signature": "tcc6faL2yUC6dgZdmrwh1Y4cGa/ebXEkAi6RibDsvpi+tE/1+82j...65r==", # noqa: E501
"SigningCertUrl": "https://sns.us-east-2.amazonaws.com/SimpleNotificationService-ac565b8b1a6c5d002d285f9598aa1d9b.pem", # noqa: E501
"MessageId": "95df01b4-ee98-5cb9-9903-4c221d41eb5e",
"Message": "Hello from SNS!",
"MessageAttributes": {
"Test": {"Type": "String", "Value": "TestString"},
"TestBinary": {"Type": "Binary", "Value": "TestBinary"},
},
"Type": "Notification",
"UnsubscribeUrl": "https://sns.us-east-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-2:123456789012:test-lambda:21be56ed-a058-49f5-8c98-aedd2564c486", # noqa: E501
"TopicArn": "arn:aws:sns:us-east-2:123456789012:sns-lambda",
"Subject": "TestInvoke",
},
}
]
}
|
import numpy as np
import pandas as pd
from collections import deque
import matplotlib
# matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
def len_reg(x,y):
n = len(x)
sigma_x = np.sum(x)
sigma_xsq = np.sum(x ** 2)
sigma_y = np.sum(y)
sigma_xy = np.sum(x * y)
A = np.array([[n, sigma_x], [sigma_x, sigma_xsq]])
B = np.array([[sigma_y], [sigma_xy]])
sol = np.linalg.solve(A, B)
# print(n, sigma_x, sigma_xsq, sigma_xy, sigma_y)
print(sol)
x_values = x
y_values = deque(map(lambda b: sol[0] + sol[1] * b, x_values))
# print(y_values[0:100])
plt.plot(x_values, y_values)
plt.scatter(x, y)
plt.xlabel("% of economic class")
plt.ylabel("Vote gained")
plt.show()
|
from GameObject import GameObject
import pygame
class Hole(GameObject):
def init():
#Loading and scaling player image
Hole.image = pygame.image.load('images/mousehole.png').convert_alpha()
#Using the super (gameobject) init and update
def __init__(self, x, y, rows):
self.rows = rows
self.width = 500
self.blockWidth = int(self.width / rows)
self.playerWidth = int((2/5)*self.blockWidth)
self.scaled = pygame.transform.scale(Hole.image, (self.playerWidth, self.playerWidth))
super(Hole, self).__init__(x, y, self.scaled, self.playerWidth / 2)
def update(self, w, h):
super(Hole, self).update()
class Trap(GameObject):
def init():
#Loading and scaling player image
Trap.image = pygame.image.load('images/trap.png').convert_alpha()
#Using the super (gameobject) init and update
def __init__(self, x, y, rows):
self.hit = False
self.rows = rows
self.width = 500
self.blockWidth = int(self.width / rows)
self.playerWidth = int((2/5)*self.blockWidth)
self.scaled = pygame.transform.scale(Trap.image, (self.playerWidth, self.playerWidth))
super(Trap, self).__init__(x, y, self.scaled, self.playerWidth / 2)
def update(self, w, h):
super(Trap, self).update()
class Enemy(GameObject):
def init():
Enemy.image = pygame.image.load('images/enemy.png').convert_alpha()
def __init__(self, d1, d2, i, r):
self.width = 500
self.rows = r
self.blockWidth = int(self.width / self.rows)
self.d1 = d1
self.d2 = d2
self.i = i
self.x = 0
self.y = 0
self.vx = 0
self.vy = 0
w = self.blockWidth / 2
#Finding the initial x and y, as well as velocities, of the enemy
#Based off of the inputs from spawnEnemy fct in Maze class
if d1 == 'V':
self.x = w+(w*i*2)
if d2 == 'U':
self.y = self.width + w
self.vy = -5
if d2 == 'D':
self.y = -w
self.vy = 5
if d1 == 'H':
self.y = w+(w*i*2)
if d2 == 'L':
self.x = self.width + w
self.vx = -5
if d2 == 'R':
self.x = -w
self.vx = 5
self.scaled = pygame.transform.scale(Enemy.image, (self.blockWidth, self.blockWidth))
super(Enemy, self).__init__(self.x, self.y, self.scaled, w)
def update(self, x, y, w, h):
self.x = x
self.y = y
super(Enemy, self).update()
class MazeBlock(GameObject):
def init():
#Loading a blank white image to be drawn onto
MazeBlock.image = pygame.image.load('images/grass.png').convert()
#Creating a board that will use boolean values to determine legal
#player moves later
MazeBlock.board = []
def __init__(self, x, y, dirs, rows):
#Scaling the image
self.rows = rows
self.width = 500
self.blockWidth =int(self.width / self.rows)
#Implemented following 3 lines to deal with scaling visual glitch
w = self.blockWidth
self.tile = pygame.transform.scale(MazeBlock.image, (w, w))
self.n, self.e, self.s, self.w = dirs[0], dirs[1], dirs[2], dirs[3]
MazeBlock.board.append([self.n, self.e, self.s, self.w])
#Adding blank once all other pieces are there
if len(MazeBlock.board) == self.rows**2-1:
MazeBlock.board.append(0)
super(MazeBlock, self).__init__(x, y, self.tile, 0)
#Drawing on the piece based off of inputs
self.drawPiece()
def drawPiece(self):
#Setting up RGB values
black = (0, 0, 0)
white = (255, 255, 255)
brown = (139,69,19)
#Creating an outline
self.outline = (0, 0, self.blockWidth, self.blockWidth)
pygame.draw.rect(self.tile, black, self.outline, 1)
#Drawing path based off of inputted direction values from initiation
L1 = (1/5)*self.blockWidth
L2 = (2/5)*self.blockWidth
L3 = (3/5)*self.blockWidth
if self.n:
r = (L2, 0, L1, L3)
pygame.draw.rect(self.tile, brown, r)
if self.e:
r = (L2, L2, L3, L1)
pygame.draw.rect(self.tile, brown, r)
if self.s:
r = (L2, L2, L1, L3)
pygame.draw.rect(self.tile, brown, r)
if self.w:
r = (0, L2, L3, L1)
pygame.draw.rect(self.tile, brown, r)
#Standard update fct
def update(self, x, y, w, h):
self.x = x
self.y = y
super(MazeBlock, self).update()
class Blank(GameObject):
def init():
#Loading the blank image
Blank.image = pygame.image.load('images/blank.png').convert()
#Calling init and having update, same as Block class
def __init__(self, x, y, rows):
self.rows = rows
self.width = 500
self.blockWidth = int(self.width / self.rows)+1
self.scaled = pygame.transform.scale(Blank.image, (self.blockWidth, self.blockWidth))
super(Blank, self).__init__(x, y, self.scaled, 0)
def update(self, x, y, w, h):
self.x = x
self.y = y
super(Blank, self).update()
class Player(GameObject):
def init():
#Loading and scaling player image
Player.image = pygame.image.load('images/player.png').convert_alpha()
#Using the super (gameobject) init and update
def __init__(self, x, y, rows, dir):
self.rows = rows
self.width = 500
self.blockWidth = int(self.width / rows)
self.playerWidth = int((2/5)*self.blockWidth)
self.scaled = pygame.transform.scale(Player.image, (self.playerWidth, self.playerWidth))
self.rotated = self.scaled
if dir == 'N':
self.rotated = pygame.transform.rotate(self.scaled, 180)
if dir == 'E':
self.rotated = pygame.transform.rotate(self.scaled, 90)
if dir == 'W':
self.rotated = pygame.transform.rotate(self.scaled, 270)
super(Player, self).__init__(x, y, self.rotated, 0)
def update(self, x, y, w, h):
self.x = x
self.y = y
super(Player, self).update()
class Point(GameObject):
def init():
#Loading and scaling player image
Point.yellow = pygame.image.load('images/cheese.png').convert_alpha()
#Using the super (gameobject) init and update
def __init__(self, x, y, rows):
self.hit = False
self.rows = rows
self.width = 500
self.blockWidth = int(self.width / rows)
self.playerWidth = int((2/5)*self.blockWidth)
self.scaled = pygame.transform.scale(Point.yellow, (self.playerWidth, self.playerWidth))
super(Point, self).__init__(x, y, self.scaled, self.playerWidth)
def update(self, w, h):
super(Point, self).update() |
import numpy as np
import cv2
from train import train
from sklearn.neighbors import NearestNeighbors
COLORS = np.random.random_integers(0, high=255, size=(100, 3))
def foot(rect):
x, y, w, h = rect
pad_w, pad_h = int(0.15*w), int(0.05*h)
return (x+w/2,y+h-pad_h)
def draw_map(img, circles):
r = 10
for circle in circles[-5:]:
r -= 2
for (i, (x, y)) in enumerate(circle):
#import pdb; pdb.set_trace()
cv2.circle(img, (int(x),int(y)), r, COLORS[i].tolist(), -1)
def draw_detections(img, rects, thickness = 1, weight = None):
for rect in rects[-1:]:
for (i, (x, y, w, h)) in enumerate(rect):
# the HOG detector returns slightly larger rectangles than the real objects.
# so we slightly shrink the rectangles to get a nicer output.
#import pdb; pdb.set_trace()
pad_w, pad_h = int(0.15*w), int(0.05*h)
sample = img[y:y+h, x:x+w].copy()
cv2.ellipse(img, (x+w/2,y+h-pad_h), (w/3, w/5), 0, 0, 360, (250,0,0), 2)
cv2.circle(img, (x+w/2,y+h-pad_h), 3, (250,0,0), -1)
cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), COLORS[i].tolist(), thickness)
#cv2.putText(img,'(%s,%s)'%(x,y+h),(x, y+h), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),1,cv2.LINE_AA)
key1 = np.loadtxt(open('data1.csv', 'rb'), delimiter=',')
key2 = np.loadtxt(open('data2.csv', 'rb'), delimiter=',')
key3 = np.loadtxt(open('data3.csv', 'rb'), delimiter=',')
key4 = np.loadtxt(open('data4.csv', 'rb'), delimiter=',')
keys = np.hstack((key1, key2, key3, key4)).reshape(-1, 4, 2).astype(np.float32)
pts = np.float32([[228,228],[228,372], [0,36], [0,396]])
tpl = train()
Found1 = np.loadtxt(open('player1.csv', 'rb'), delimiter=',')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
#out = cv2.VideoWriter('CourtMapping.avi',fourcc, 20.0, (1920,720))
cv2.namedWindow('frame')
cap = cv2.VideoCapture('nba4_clip.avi')
num = 1
ret, frame = cap.read()
tpl_h,tpl_w = tpl.shape
frame_h, frame_w, _ = frame.shape
M = cv2.getPerspectiveTransform(keys[0], pts)
N = cv2.getPerspectiveTransform( pts, keys[0])
#roi = cv2.perspectiveTransform(found_map_1.reshape(-1,1,2), M).reshape(-1,2)
Found = []
Found_map = []
while(cap.isOpened()):
num += 1
ret, frame = cap.read()
M = cv2.getPerspectiveTransform(keys[num-1], pts)
N = cv2.getPerspectiveTransform( pts, keys[num-1])
blank = np.zeros_like(frame)
tplC = cv2.cvtColor(tpl, cv2.COLOR_GRAY2BGR)
#foundFoot = map(foot, found)
#found_map_1 = np.float32(foundFoot)[:, :2]
#found_map = cv2.perspectiveTransform(found_map_1.reshape(-1,1,2), M).reshape(-1,2)
#nbrs = NearestNeighbors(n_neighbors=1).fit(found_map)
#distances, indices = nbrs.kneighbors(roi)
#roi = found_map[indices].reshape(-1,2)
#Found.append(np.array(found)[indices].reshape(-1,4))
#Found_map.append(roi)
#draw_map(tplC, Found_map)
draw_detections(frame, Found)
tplRot = cv2.warpPerspective(tpl, N, (frame_w, frame_h))
tplRot2 = cv2.cvtColor(tplRot, cv2.COLOR_GRAY2BGR)
frame = cv2.addWeighted(tplRot2, 0.2, frame, 0.8, 0)
#blank_map = cv2.warpPerspective(blank, M, (tpl_w,tpl_h))
#dst = cv2.addWeighted(blank_map, 0.5, tplC, 0.5, 0)
wrap = cv2.copyMakeBorder(tplC, 60, 60, 20, 20, cv2.BORDER_CONSTANT, value=0)
mix = np.hstack((frame, wrap))
cv2.imshow('frame', mix)
#out.write(mix)
if cv2.waitKey(10000) & 0xFF == 27:
break
import pdb; pdb.set_trace()
cap.release()
#out.release()
cv2.destroyAllWindows() |
# Author Emily Wang
#!/usr/bin/env python
# coding: utf-8
#import anal_util from ajustador/FrontNeuroinf
import sys
import os
import numpy as np
import pandas as pd
import glob
import scipy
import sklearn as sc
#import the random forest classifier method
from sklearn.ensemble import RandomForestClassifier
from sklearn import model_selection,metrics,tree
import anal_util as au
from matplotlib import pyplot as plt
import operator
from matplotlib.colors import ListedColormap
def plotPredictions(max_feat, train_test, predict_dict, neurtypes, feature_order,epoch):
########## Graph the output using contour graph
#inputdf contains the value of a subset of features used for classifier, i.e., two different columns from df
feature_cols = [feat[0] for feat in feature_order]
inputdf = alldf[feature_cols[0:max_feat]]
plt.ion()
edgecolors=['k','none']
feature_axes=[(i,i+1) for i in range(0,max_feat,2)]
for cols in feature_axes:
plt.figure()
plt.title('Epoch '+str(epoch))
for key,col in zip(train_test.keys(),edgecolors):
predict=predict_dict[key]
df=train_test[key][0]
plot_predict=[neurtypes.index(p) for p in predict]
plt.scatter(df[feature_cols[cols[0]]], df[feature_cols[cols[1]]], c=plot_predict,cmap=ListedColormap(['r', 'b']), edgecolor=col, s=20,label=key)
plt.xlabel(feature_cols[cols[0]])
plt.ylabel(feature_cols[cols[1]])
plt.legend()
def plot_features(list_features,epochs,ylabel):
plt.ion()
objects=[name for name,weight in list_features]
y_pos = np.arange(len(list_features))
performance = [weight for name, weight in list_features]
f = plt.figure(figsize=(6,4))
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.xticks(rotation=90)
plt.ylabel(ylabel)
plt.xlabel('Feature')
plt.title(ylabel+' over '+epochs+' epochs')
def runClusterAnalysis(param_values, labels, num_features, alldf,epoch,MAXPLOTS):
############ data is ready for the cluster analysis ##################
#select a random subset of data for training, and use the other part for testing
#sklearn.model_selection.train_test_split(*arrays, **options)
#returns the top max_feat number of features and their weights
df_values_train, df_values_test, df_labels_train, df_labels_test = model_selection.train_test_split(param_values, labels, test_size=0.33)
train_test = {'train':(df_values_train,df_labels_train), 'test':(df_values_test, df_labels_test)}
#number of estimators (n_estim) is number of trees in the forest
#This is NOT the number of clusters to be found
#max_feat is the number of features to use for classification
#Empirical good default value is max_features=sqrt(num_features) for classification tasks
max_feat=int(np.ceil(np.sqrt(num_features)))
n_estim=10
rtc = RandomForestClassifier(n_estimators=n_estim, max_features=max_feat)
#This line actually builds the random forest (does the training)
rtc.fit(df_values_train, df_labels_train)
###### EVALUATE THE RESULT
#calculate a score, show the confusion matrix
predict_dict = {}
for nm,(df,labl) in train_test.items():
predict = rtc.predict(df)
predict_dict[nm] = predict
#evauate the importance of each feature in the classifier
#The relative rank (i.e. depth) of a feature used as a decision node in a tree can be used to assess the relative importance of that feature with respect to the predictability of the target variable.
feature_order = sorted({feature : importance for feature, importance in zip(list(df_values_train.columns), list(rtc.feature_importances_))}.items(), key=operator.itemgetter(1), reverse=True)
###### 3d, plot amd print the predictions of the actual data -- you can do this if # of epochs is low
if epoch<=MAXPLOTS:
plotPredictions(max_feat, train_test, predict_dict, neurtypes, feature_order,epoch)
#print('epoch {} best features {}'.format(epoch,feature_order[0:max_feat]))
return feature_order[0:max_feat], max_feat
# # Setting Up Data Files for Cluster Analysis
def set_up_df(neurtypes,path_root, tile=0.005, num_fits=None): #take pattern: ex. "/path/fileroot"
#set of data files from parameter optimization
pattern = path_root+'*.npz'
#if small=True, use num_fits from each optimization, else, use %tile
small = True
#retrieve data files -- sort the files by which neurtype
fnames = glob.glob(pattern)
group_names = {key:[f for f in fnames if key in f] for key in neurtypes}
if len(fnames)==0:
print('no files found by searching for', pattern)
##### process all examples of each type, combine into dict of data frames and then one dataframe
df_list = {}
df_list_of_lists = {}
for neur in neurtypes:
df_list[neur], df_list_of_lists[neur] = au.combined_df(group_names[neur], tile, neur)
#df_list[neur] is a DATAFRAME
#df_list_of_lists[neur] is a LIST OF DATAFRAMES (1 dataframe per npz file)
#list containing fit values for every fit for every neuron
alldf = pd.concat([df for df in df_list.values()])
print('all files read. Neuron_types: ', pd.unique(alldf['neuron']), 'df shape', alldf.shape,'columns',alldf.columns,'files',pd.unique(alldf['cell']),'\n')
####create smaller df using just small and same number of good fits from each neuron
min_samples = np.min([n.shape[0] for vals in df_list_of_lists.values() for n in vals])
if num_fits:
num_samples=min(min_samples, num_fits)
else:
num_samples=min_samples
smalldf_list = {neur:[] for neur in neurtypes}
for neur in neurtypes:
for i in range(len(df_list_of_lists[neur])):
smalldf_list[neur].append(df_list_of_lists[neur][i][-num_samples:])
print('*********** number of cells in smalldf_list: ', [len(smalldf_list[n]) for n in neurtypes])
if num_fits:
alldf=pd.concat([df for dfset in smalldf_list.values() for df in dfset])
print('SMALLER SET OF SAMPLES: Neuron_types: ', pd.unique(alldf['neuron']), 'df shape', alldf.shape,'files',pd.unique(alldf['cell']))
#exclude entire row (observation) if Nan is found
alldf = alldf.dropna(axis=1)
#identify fitness columns and number of features (parameter values)
fitnesses = [col for col in alldf.columns if 'fitness' in col]
chan_params = [col for col in alldf.columns if 'Chan' in col]
num_features = len(alldf.columns)-len(fitnesses)
print('new shape', alldf.shape,'fitnesses:', len(fitnesses), 'params',num_features)
#create dataframe with the 'predictor' parameters - conductance and channel kinetics
#exclude columns that containing neuron identifier or fitness values, include the total fitness
exclude_columns = fitnesses + ['neuron','neurtype','junction_potential', "model", "cell", 'total'] #total? ['neuron','neurtype','junction_potential']
param_columns = [column for column in list(alldf.columns) if column not in exclude_columns]
param_values = alldf[param_columns]
#labels contains the target values (class labels) of the training data
labels = alldf['neuron']
return (param_values, labels, num_features, alldf)
############ MAIN #############
#### parameters to control analysis.
epochs = 10#00 ##100 or 1000, 10 for testing
neurtypes = ['Npas','proto'] #which neurtypes you are identifying between
path_root='opt_output/temeles_gpopt_output/' #directory and root file name of set of files
tile=0.005 #what percentage of best fit neurons do you want to use
num_fits=10 #how many of each fit for classification of just a few of best fit neurons
#Set to zero to suppress plotting graphs
MAXPLOTS=3
#### end of parameters
### read in all npz files, select top tile% of model fits, put into pandas dataframe
param_values, labels, num_features, alldf = set_up_df(neurtypes,path_root,tile, num_fits)
### Do Cluster Analysis
# Top 8 features & their weights in each epoch are cumulatively summed in collectionBestFeatures = {feature: totalWeightOverAllEpochs}
# Top 1 feature in each epoch is stored in collectionTopFeatures = {feature: numberOfTimesAsTopFeatureOverAllEpochs}
collectionBestFeatures = {}
collectionTopFeatures = {}
for epoch in range(0, epochs):
features, max_feat = runClusterAnalysis(param_values, labels, num_features, alldf,epoch,MAXPLOTS)
print()
#pass in parameter to control plotting
print('##### BEST FEATURES for EPOCH '+str(epoch)+' #######')
for i,(feat, weight) in enumerate(features):
print(i,feat,weight) #monitor progress
if feat not in collectionBestFeatures: # How is the weight scaled? caution
collectionBestFeatures[feat] = weight
else:
collectionBestFeatures[feat] += weight
f, w = features[0]
if f not in collectionTopFeatures:
collectionTopFeatures[f] = 1
else:
collectionTopFeatures[f] += 1
#### Plotting BestFeatures (Weieghts) and TopFeatures (Frequency)
#To run in the background:
#put in batch file: create rc.bat which has 1 line:
# python3 randomclassifer.py
#from unix command line type
#at -f rc.bat NOW
listBestFeatures=sorted(collectionBestFeatures.items(),key=operator.itemgetter(1),reverse=True)
listTopFeatures=sorted(collectionTopFeatures.items(),key=operator.itemgetter(1),reverse=True)
if MAXPLOTS:
plot_features(listBestFeatures,str(epochs),'Total Weight')
plot_features(listTopFeatures,str(epochs),'Total Weight')
########### Save results for later #############
#np.save('bestFeatures.txt',arr={'objects':objects,'perf':performance})
np.savez('Feature', best_features=listBestFeatures, top_features=listTopFeatures)
###### NOTES
########################### need to do cluster analysis when labels are not know and best features are not known ##########
### e.g. using the hierarchical clustering in SAS, but need a method better than disciminant analysis to select features ###
# Explains different methods for evaluating clusters:
#https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/
# TODO
# How to further simplify tree to comment on the entire forest behaviour.
# What is the meaning of tree.dot
# each optimization gives different results in terms of important features - how to resolve
# label neurons in scatter plot based on neuron type('proto', 'arky'), and add legend
# use neuron number and random seed to label the different clusters.
#https://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_iris.html#sphx-glr-auto-examples-ensemble-plot-forest-iris-py
#
#What about using random forest to select parameters, and then hierarchical using those parameters?
#MAY NEED to evaluate how results vary with max_feat and n_etim
#https://scikit-learn.org/stable/modules/ensemble.html#random-forests
|
# project/api/views.py
from flask import Blueprint, jsonify, request
from project.api.models import User, Kanji, Entry, Reading, ReadingInfo, Meaning
from project import db
from sqlalchemy import exc
users_blueprint = Blueprint('users', __name__)
@users_blueprint.route('/ping', methods=['GET'])
def ping_pong():
return jsonify({
'status': 'success',
'message': 'pong!'
})
@users_blueprint.route('/users', methods=['POST'])
def add_user():
post_data = request.get_json()
if not post_data:
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
return jsonify(response_object), 400
username = post_data.get('username')
email = post_data.get('email')
try:
user = User.query.filter_by(email=email).first()
if not user:
db.session.add(User(username=username, email=email))
db.session.commit()
response_object = {
'status': 'success',
'message': f'{email} was added!'
}
return jsonify(response_object), 201
else:
response_object = {
'status': 'fail',
'message': 'Sorry. That email already exists.'
}
return jsonify(response_object), 400
except exc.IntegrityError as e:
db.session.rollback()
response_object = {
'status': 'fail',
'message': 'Invalid payload.'
}
return jsonify(response_object), 400
@users_blueprint.route('/users/<user_id>', methods=['GET'])
def get_single_user(user_id):
""" Get single user details """
response_object = {
'status': 'fail',
'message': 'User does not exist'
}
try:
user = User.query.filter_by(id=int(user_id)).first()
if not user:
return jsonify(response_object), 404
else:
user = User.query.filter_by(id=user_id).first()
response_object = {
'status': 'success',
'data': {
'username': user.username,
'email': user.email,
'created_at': user.created_at
}
}
return jsonify(response_object), 200
except ValueError:
return jsonify(response_object), 404
@users_blueprint.route('/users', methods=['GET'])
def get_all_users():
""" Get all users """
users = User.query.all()
users_list = []
for user in users:
user_object = {
'id': user.id,
'username': user.username,
'email': user.email,
'created_at': user.created_at
}
users_list.append(user_object)
response_object = {
'status': 'success',
'data': {
'users': users_list
}
}
return jsonify(response_object), 200
def get_single_kanji(kanji_hexa):
""" Get Kanji details from hexadecimal code """
response_object = {
'status': 'fail',
'message': 'Kanji does not exist'
}
try:
entry = Entry.query.filter_by(seq=int(kanji_hexa,16)).first() #int(kanji,id, 16) converts hexadecimal to decimal
response_object2 = {
'status': 'fail',
'message': 'Kanji does not exist',
'codigo': int(kanji_hexa,16)
}
if not entry:
return jsonify(response_object2), 404
else:
#kanji = Kanji.query.filter_by(entr=kanji_hexa).first()
#db.session.query(kanji.txt, entry.).\
# join(Account, Account.organization == User.organization).\
# filter(Account.name == 'some name')
kanji = Kanji.query.filter_by(entr=int(entry.id)).first()
readings = Reading.query.order_by(Reading.rdng.asc()).filter_by(entr=int(kanji.entr)).all()
readingsinfo = ReadingInfo.query.order_by(ReadingInfo.rdng.asc()).filter_by(entr=int(kanji.entr)).all()
joint = db.session.query(ReadingInfo.kw, Reading.txt).\
join(Reading, Reading.entr == ReadingInfo.entr).\
filter(Reading.entr == kanji.entr).distinct()
all_readings = []
readings_on = []
readings_kun = []
for i in range(len(readings)):
if readingsinfo[i].kw == 128:
readings_on.append(readings[i].txt)
if readingsinfo[i].kw == 106:
readings_kun.append(readings[i].txt)
meanings = Meaning.query.order_by(Meaning.gloss.asc()).filter_by(entr=int(kanji.entr)).all()
meanings_list = []
for meaning in meanings:
meanings_list.append(meaning.txt)
response_object = {
'status': 'success',
'data': {
'id': entry.id,
'kanji': kanji.txt,
'decimal': entry.seq,
'hexadecimal': kanji_hexa,
'readings': {
'onyomi': readings_on,
'kunyomi': readings_kun
},
'meanings': meanings_list
}
}
return jsonify(response_object), 200
except ValueError:
return jsonify(response_object), 404
@users_blueprint.route('/kanji/<kanji_char>', methods=['GET'])
def get_single_kanji_char(kanji_char):
""" Get Kanji details from Kanji character """
for _c in kanji_char:
hexa_code = ('%04x' % ord(_c))
return get_single_kanji(hexa_code.upper())
@users_blueprint.route('/hexa/<kanji_hexa>', methods=['GET'])
def get_single_kanji_hext(kanji_hexa):
""" Get Kanji details from hexadecimal code """
return get_single_kanji(kanji_hexa.upper())
|
#!/usr/bin/env python3
#
# A mobility class for Levy walk.
# Copyright (c) 2011-2015, Hiroyuki Ohsaki.
# All rights reserved.
#
# Id: LevyWalk.pm,v 1.11 2015/12/09 14:45:23 ohsaki Exp $
#
import random
import math
from dtnsim.mobility.rwp import RandomWaypoint
from vector import Vector as V
def pareto(scale, shape):
"""Generate a random variable following the Pareto distribution with
parameters SCALE and SHAPE. Note that the mean of the Pareto distribution
is given by SHAPE * SCALE / (SHAPE - 1)."""
return scale / random.uniform(0, 1 / shape)
class LevyWalk(RandomWaypoint):
def __init__(self, scale=100, shape=1.5, *kargs, **kwargs):
# NOTE: must be assigned before calling __init__
self.scale = scale
self.shape = shape
super().__init__(*kargs, **kwargs)
def goal_coordinate(self):
"""Randomly choose the goal in the field so that the distance from the
current coordinate follows Pareto distribution."""
length = pareto(self.scale, self.shape)
theta = random.uniform(0, 2 * math.pi)
goal = self.current + length * V(math.cos(theta), math.sin(theta))
# FIXME: the goal coordinate is simply limited by the field boundaries.
# A node should *bounce back* with the boundaries.
x = max(0, min(goal[0], self.width))
y = max(0, min(goal[1], self.height))
return V(x, y)
|
from bottle import route, run, template
import requests
import os
from subprocess import Popen
# startup react-markup-server
Popen('npm start >& react-markup-service.log', shell=True,
stdin=None,
stdout=None,
stderr=None,
close_fds=True)
markup_api_url = 'http://localhost:8181/render'
port = os.getenv('PORT', 8080)
def get_markup(component = '', payload = {}):
post_data = payload.copy()
post_data.update({ 'component': component })
resp = requests.post(markup_api_url, json=post_data)
print "%s response from react-markup-service: %s" % (resp.status_code, resp.text)
if resp.status_code == 200:
return resp.content
return ''
@route('/')
def index():
markup = get_markup('./Greeting', { 'name':'world' })
return markup
@route('/<name>')
def index(name):
markup = get_markup('./Greeting', {'name': name})
return markup
run(host='localhost', port=8080)
|
# from code_challenges.linkedList.linked_list import *
class Node:
def __init__(self,value):
self.value = value
self.next = None
class LinkedList:
def __init__(self, head = None):
self.head = head
def append(self,value):
currnet = self.head
prev = None
while currnet:
prev = currnet
currnet = currnet.next
if prev:
prev.next = Node(value)
else:
self.head = Node(value)
def insert_before(self,value,newValue):
newNode = Node(newValue)
flag = True
try:
if self.head.value == value:
newNode.next = self.head
self.head = newNode
current = None
flag = False
else:
prev = self.head
current = self.head.next
except:
current = None
while current:
if current.value == value:
prev.next = newNode
newNode.next = current
break
else:
prev = current
current = current.next
if not current and flag:
raise Exception('Value not found')
def insert_after(self,value,newValue):
newNode = Node(newValue)
current = self.head
while current:
if current.value == value:
newNode.next = current.next
current.next = newNode
break
else:
current = current.next
if not current:
raise Exception('Value not found')
def kth_from_end(self,k):
current = self.head
prev = self.head
n = 0
if k < 0:
raise Exception('K is a negative value')
while current:
current = current.next
if n == k+1:
prev = prev.next
else:
n += 1
if n == k+1:
return prev.value
else:
raise Exception('k is greater than the length of the linked list')
def zip_List(LinkedList1, LinkedList2):
newList = LinkedList()
current = LinkedList1.head
while current:
newList.append(current.value)
newList.append(0)
current = current.next
current = newList.head
counter = 0
current2 = LinkedList2.head
while current:
if counter % 2 == 1:
current.value = current2.value
current = current.next
current2 = current.next
counter += 1
return newList
def test_add_to_end():
n1 = Node(1)
n2 = Node(2)
n3 = Node(4)
n1.next = n2
n2.next = n3
ll = LinkedList(n1)
ll.append(5)
actual = n3.next.value
expected = 5
assert actual == expected
def test_add_multiple_to_end():
n1 = Node(1)
n2 = Node(2)
n3 = Node(4)
n1.next = n2
n2.next = n3
ll = LinkedList(n1)
ll.append(5)
ll.append(7)
actual1 = n3.next.value
expected1 = 5
assert actual1 == expected1
actual2 = n3.next.next.value
expected2 = 7
assert actual2 == expected2
def test_insert_before():
n1 = Node(1)
n2 = Node(2)
n3 = Node(4)
n1.next = n2
n2.next = n3
ll = LinkedList(n1)
ll.insert_before(2,3)
actual = n1.next.value
expected = 3
assert actual == expected
def test_insert_before_first_node():
n1 = Node(1)
ll = LinkedList(n1)
ll.insert_before(1,3)
actual = ll.head.value
expected = 3
assert actual == expected
def test_insert_after():
n1 = Node(1)
n2 = Node(2)
n3 = Node(4)
n1.next = n2
n2.next = n3
ll = LinkedList(n1)
ll.insert_after(2,3)
actual = n2.next.value
expected = 3
assert actual == expected
def test_insert_after_last_node():
n1 = Node(1)
n2 = Node(2)
n3 = Node(4)
n1.next = n2
n2.next = n3
ll = LinkedList(n1)
ll.insert_after(4,3)
actual = n3.next.value
expected = 3
assert actual == expected
|
from source.attribute_grouper import AttributeGrouper
from source.dataframe_splitter import DataframeSplitter
from source.dataframe_monthwise_splitter import DataframeMonthwiseSplitter
from source.month_attribute_grouper import MonthAttributeGrouper
def generate_attribute_grouper_data():
attribute_grouper = AttributeGrouper("data/flights.csv")
#attribute_grouper.plot(["DESTINATION_AIRPORT"], 15, "plots/Busiest_Destination.png")
#attribute_grouper.plot(["ORIGIN_AIRPORT"], 15, "plots/Busiest_Origin.png")
#attribute_grouper.plot(["ORIGIN_AIRPORT", "DESTINATION_AIRPORT"], 10, "plots/Busiest_Origin_Destination_Pairs.png")
#attribute_grouper.export(["ORIGIN_AIRPORT"], "outputFiles/origin_frequency.csv", ['ORIGIN AIRPORT', 'COUNT'])
#attribute_grouper.export(["DESTINATION_AIRPORT"], "outputFiles/destination_frequency.csv", ['DESTINATION AIRPORT', 'COUNT'])
#attribute_grouper.carrierPath(["AIRLINE", "ORIGIN_AIRPORT", "DESTINATION_AIRPORT"],
# "outputFiles/carrier_statistics.csv",
# ['AIRLINE CARRIER', 'ORIGIN AIRPORT', 'DESTINATION AIRPORT', 'COUNT'])
#attribute_grouper.plot(["AIRLINE", "ORIGIN_AIRPORT", "DESTINATION_AIRPORT"], 20, "plots/Airline_Counts_Path.png")
#attribute_grouper.export(["AIRLINE"], "outputFiles/airline_frequency.csv", ['AIRLINE', 'COUNT'])
#attribute_grouper.plot(["AIRLINE"], 15, "plots/Airline_Frequency.png")
#attribute_grouper.export(["ORIGIN_AIRPORT","MONTH"],"outputFiles/monthwise_origin_frequency.csv",
# ['ORIGIN AIRPORT', 'MONTH', 'COUNT'])
#attribute_grouper.export(["MONTH", "AIRLINE"], "outputFiles/flight_count_month_airline.csv", ['MONTH','AIRLINE', 'COUNT'])
attribute_grouper.export(["ORIGIN_AIRPORT", "AIRLINE"], "outputFiles/airline_count_origin_airport.csv",
['ORIGIN AIRPORT', 'AIRLINE', 'COUNT'])
# Class for splitting data
def generate_split_data(is_arrival):
dataframe_splitter = DataframeSplitter("data/flights.csv")
#
field_name = "DEPARTURE_DELAY"
file_prefix = "departure"
scheduled = "SCHEDULED_DEPARTURE"
if is_arrival:
field_name = "ARRIVAL_DELAY"
file_prefix = "arrival"
scheduled = "SCHEDULED_ARRIVAL"
dataframe_splitter.early_event(["AIRLINE", scheduled, "ORIGIN_AIRPORT", field_name],
field_name, "generatedCsv/early_{}.csv".format(file_prefix))
attribute_grouper_early_event = AttributeGrouper("generatedCsv/early_{}.csv".format(file_prefix))
#attribute_grouper_early_event.export(["AIRLINE"], "outputFiles/airline_early_{}_count.csv".format(file_prefix),
# ['AIRLINE', 'COUNT'])
#attribute_grouper_early_event.plot(["AIRLINE"], 30, "plots/Early_{}.png".format(file_prefix))
#dataframe_splitter.late_event(["AIRLINE", scheduled, "ORIGIN_AIRPORT", field_name],
# field_name, "generatedCsv/late_{}.csv".format(file_prefix))
attribute_grouper_late_event = AttributeGrouper("generatedCsv/late_{}.csv".format(file_prefix))
#attribute_grouper_late_event.export(["AIRLINE"], "outputFiles/airline_late_{}_count.csv".format(file_prefix),
# ['AIRLINE', 'COUNT'])
#attribute_grouper_late_event.export(["AIRLINE"], "outputFiles/airline_late_{}_count.csv".format(file_prefix),
# ['AIRLINE', 'COUNT'])
#attribute_grouper_late_event.plot(["AIRLINE"], 30, "plots/Late_{}.png".format(file_prefix))
#dataframe_splitter.on_time_event(["AIRLINE", scheduled, "ORIGIN_AIRPORT", field_name],
# field_name, "generatedCsv/on_time_{}.csv".format(file_prefix))
#attribute_grouper_on_time_event = AttributeGrouper("generatedCsv/on_time_{}.csv".format(file_prefix))
#attribute_grouper_on_time_event.export(["AIRLINE"], "outputFiles/airline_on_time_{}_count.csv".format(file_prefix),
# ['AIRLINE', 'COUNT'])
#attribute_grouper_on_time_event.export(["AIRLINE"], "outputFiles/airline_on_time_{}_count.csv".format(file_prefix),
# ['AIRLINE', 'COUNT'])
#attribute_grouper_on_time_event.plot(["AIRLINE"], 30, "plots/On_Time_{}.png".format(file_prefix))
def generate_split_monthwise():
dataframe_monthwise_splitter = DataframeMonthwiseSplitter("data/flights.csv")
month_names = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
for month in range(0, 12):
dataframe_monthwise_splitter.split_by_month('MONTH', month+1,
"generatedCsv/monthwiseFiles/{}.csv".format(month_names[month]))
def generate_analysis_monthwise():
month_names = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October",
"November", "December"]
month_attribute_groupers = []
for month in month_names:
month_attribute_grouper = MonthAttributeGrouper(month)
month_attribute_groupers.append(month_attribute_grouper)
#month_attribute_grouper.month_attribute_grouper(["ORIGIN_AIRPORT"], ["ORIGIN", "Count"],
# "originMaxFrequency", 20)
month_attribute_grouper.month_attribute_grouper(['AIRLINE'], ["AIRLINE", "Count"],"airlineMaxFrequency", 20)
# Write the monthly frequency into frequency
if __name__ == "__main__":
#generate_attribute_grouper_data()
generate_split_data(is_arrival=True)
#generate_split_monthwise()
#generate_analysis_monthwise()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
def upload_handler(instance, filename):
return 'upload_license{}/{}'.format(instance.user.id, filename)
class UploadLicense(models.Model):
file = models.ImageField(upload_to='upload_license/')
|
# Ghiro - Copyright (C) 2013-2016 Ghiro Developers.
# This file is part of Ghiro.
# See the file 'docs/LICENSE.txt' for license terms.
from django.db.models import Q
from hashes.models import List
from lib.analyzer.base import BaseProcessingModule
try:
import hashlib
IS_HASH = True
except ImportError:
IS_HASH = False
class HashComparerProcessing(BaseProcessingModule):
"""Compares hashes with hashes lists."""
name = "Hash List Comparer"
description = "This plugins searches for a match between the image hash and hash lists."
order = 20
def check_deps(self):
return IS_HASH
def run(self, task):
for key, value in self.data["hash"].iteritems():
# Get all lists matching hash type.
hash_lists = List.objects.filter(cipher=key).filter(Q(owner=task.owner) | Q(public=True))
# Check hashes.
for hash_list in hash_lists:
if List.objects.filter(pk=hash_list.pk).filter(hash__value=value).exists():
hash_list.matches.add(task)
return self.results
|
import atexit
import json
import logging
import os
from datetime import datetime
from typing import Any, Dict, List, Optional
from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask, request
from flask_login import LoginManager, current_user, login_required
from webargs import fields
from webargs.flaskparser import use_args, use_kwargs
import src.auth
from src.database import get_database, setup_database
from src.enums.job_status import JobStatus
from src.gpu import GPU
from src.job import Job
from src.mocked_gpu import MockedGPU
from src.param_parsing import parametric_cli
from src.user import User
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
app.secret_key = '0785f0f7-43fd-4148-917f-62f915d94e38' # a random uuid4
app.register_blueprint(src.auth.bp)
logger = logging.getLogger(__name__)
login_manager = LoginManager()
login_manager.init_app(app)
HAS_GPU = ((os.environ.get("gpu") or '').lower() in ('true', '1', 't'))
GPU_DCT: Dict[str, GPU] = {}
running_jobs: List[Job] = []
def check_running_jobs():
to_remove = []
for job in running_jobs:
logger.warning(f"checking job: {job}")
if job.is_finished():
to_remove.append(job)
success = True
if job.process.returncode != 0:
success = False
job.complete_job(datetime.now(), success=success)
for job in to_remove:
running_jobs.remove(job)
def run_new_jobs():
for idx, gpu in GPU_DCT.items():
logger.warning(f"Checking gpu {gpu.get_name()}")
logger.warning(gpu)
if gpu.is_idle():
logger.warning(f"GPU idle {gpu.get_name()}")
queue = gpu.fetch_queue()
logger.warning(queue)
if len(queue) > 0:
logger.warning("gpus_list0: " + str(queue[0]))
queue[0]["gpus_list"] = list(map(
lambda x: GPU.load(x),
json.loads(queue[0].get("gpus_list"))
))
logger.warning("gpus_list: " + str(queue[0]))
job = Job.from_dict(queue[0])
logger.warning("queue0" + str(job))
job.run_job()
running_jobs.append(job)
gpu.set_queue(queue[1:])
def check_job_status_and_run_new():
check_running_jobs()
run_new_jobs()
scheduler = BackgroundScheduler()
scheduler.add_job(func=check_job_status_and_run_new,
trigger="interval", seconds=10)
scheduler.start()
@login_manager.user_loader
def load_user(username) -> Optional[User]:
return User.load(username)
@app.before_first_request
def get_gpus():
if not HAS_GPU:
mock_available_gpus()
else:
pass
# register every gpu in database
for gpu in GPU_DCT.values():
gpu.commit()
logger.warning("GPUS: " + str(gpu))
@app.before_first_request
def setup_redis():
setup_database()
@app.route("/hello")
def hello_world():
return "<p>Hello, World!</p>"
@app.route("/available_gpus")
def get_available_gpu_names() -> Dict[str, Any]:
return {'gpus': list(GPU_DCT.keys())}
@app.route("/gpu_stats")
def get_gpu_stats() -> Dict[str, Dict[str, Any]]:
result = {}
for gpu_name, gpu in GPU_DCT.items():
result[gpu_name] = gpu.get_stats()
return result
@app.route("/jobs")
@login_required
@use_args({
'statuses[]': fields.List(fields.Str(), required=False, default=[], missing=[]),
'gpu': fields.Str(required=False, default="", missing=""),
'count': fields.Int(required=False, default=10, missing=5),
'sortBy': fields.Str(required=False, default="newest", missing="newest"),
'project': fields.Str(required=False, default="", missing=""),
'public': fields.Bool(required=False, default=False, missing=False),
}, location="query")
def get_jobs(args: Dict[str, Any]) -> Dict[str, List[Dict[str, Any]]]:
raw_statuses: List[str] = args["statuses[]"]
gpu: str = args["gpu"]
count: int = args["count"]
sortBy: str = args["sortBy"]
project: str = args['project']
public: bool = args['public']
statuses = []
for status in raw_statuses:
if status == "queued":
statuses.append(JobStatus.QUEUED)
elif status == 'running':
statuses.append(JobStatus.RUNNING)
elif status == 'failed':
statuses.append(JobStatus.FAILED)
elif status == 'cancelled':
statuses.append(JobStatus.CANCELLED)
elif status == 'completed':
statuses.append(JobStatus.COMPLETED)
# fetch jobs
job_dicts: List[Dict[str, Any]]
job_dicts = get_database().fetch_jobs()
job_dicts = filter(lambda j: j is not None, job_dicts)
jobs: List[Job]
jobs = map(lambda j: Job.load(j), job_dicts)
# filter by user
if not public:
jobs = filter(lambda j: j.user == current_user, jobs)
# filter by project
if project != "":
jobs = filter(lambda j: j.project == project, jobs)
# filter by status
if statuses != []:
jobs = filter(lambda j: j.status in statuses, jobs)
# filter by gpu
# ugly because filters didn't work
if gpu != "":
logger.warning(gpu)
temp_jobs = []
for j in jobs:
gpu_uuids = []
for g in j.gpus_list:
logger.warning("Found " + g.uuid)
gpu_uuids.append(g.uuid)
if gpu in gpu_uuids:
temp_jobs.append(j)
jobs = temp_jobs
# sort jobs
if sortBy == "newest":
jobs = sorted(jobs, key=lambda j: j.scheduled_time, reverse=True)
elif sortBy == "oldest":
jobs = sorted(jobs, key=lambda j: j.scheduled_time, reverse=False)
elif sortBy == "duration":
jobs = sorted(jobs, key=lambda j: j.finish_time - j.start_time if j.finish_time != None else datetime.now() - j.start_time, reverse=True)
job_dicts = map(lambda j: j.dump(), jobs)
return {
"jobs": list(job_dicts)[:count]
}
@app.route("/add_job", methods=['POST'])
@login_required
@use_args({
'project': fields.Str(required=True),
'experiment_name': fields.Str(required=True),
'script_path': fields.Str(required=True),
'cli_args': fields.Str(required=False, default="", missing="", allow_none=True),
'gpus': fields.List(fields.Str, required=True),
'yaml': fields.Str(required=False, default="", missing=""),
}, location="json")
def add_new_job(arg: Dict[str, Any]) -> Dict[str, Any]:
yaml = arg['yaml']
project = arg['project']
name = arg['experiment_name']
script_path = arg['script_path']
cli_args = arg['cli_args']
gpus = list(map(lambda x: GPU_DCT.get(x, None), arg['gpus']))
assert gpus
assert script_path
assert name
assert project
def add_job(_script_path: str, _cli_args: Dict[str, str]):
job = Job(
project=project,
name=name,
script_path=_script_path,
cli_args=_cli_args,
gpus_list=gpus,
user=current_user,
)
logger.warning("job: " + str(job))
for gpu in gpus:
job.add_to_queue(gpu)
# job.run_job()
job.commit()
if yaml:
args: List[Dict[str, Any]] = parametric_cli(
cli=script_path,
yaml_str=yaml,
)
for arg_dict in args:
command: str = arg_dict['command']
arguments: Dict[str, str] = arg_dict['argument']
add_job(command, arguments)
else:
add_job(script_path, json.loads(cli_args or "{}"))
return {"status": "success"}
@app.route("/cancel_job", methods=['GET', 'POST'])
@login_required
@use_kwargs({
'uuid': fields.Str(required=True),
}, location='json')
def cancel_job(uuid: str) -> Dict[str, Any]:
job: Optional[Job] = Job.load(uuid)
user: User = current_user
print(f"Received cancelling request {job}.", flush=True)
if job is None:
return {
"status": "failed",
"code": 404,
"error": "Job not found.",
}
if job.user != user:
return {
"status": "failed",
"code": 501,
"error": "Unauthorised.",
}
# TODO: cancel job.
print(f"Cancelled {job}.", flush=True)
job.cancel_job()
return {"status": "success"}
@app.route("/job_details")
@login_required
def get_job_details() -> Dict[str, Any]:
uuid = request.args.get("uuid")
if uuid is None:
return {
"status": "failed",
"code": 400,
"error": "UUID not supplied.",
}
job: Optional[Job] = Job.load(uuid)
if job is None:
return {
"status": "failed",
"code": 404,
"error": "Job not found.",
}
return job.dump(use_gpu_name=True)
@app.route("/curr_dir", methods=['GET'])
@login_required
def get_curr_dir() -> Dict[str, Any]:
return {"status": "success", "currDir": os.getcwd()}
@app.route("/projects", methods=['GET'])
@login_required
def get_projects() -> Dict[str, Any]:
jobs = get_database().fetch_all_matching("user", current_user.username)
projectsSet = set([j['project'] for j in jobs])
projects = sorted(list(projectsSet))
if "General" in projects:
projects.remove("General")
projects.insert(0, "General")
return {
"projects": projects
}
def mock_available_gpus():
global GPU_DCT
GPU_DCT.update({
"0": MockedGPU(name="0", model="mockedGPU", total_memory_mib=12000, uuid="214175be-8c20-4f6d-8e25-bdc9c438a898"),
"1": MockedGPU(name="1", model="mockedGPU", total_memory_mib=10000, uuid="3c7a2a0e-1d5d-4df8-a85e-3dbe79de801c"),
"2": MockedGPU(name="2", model="mockedGPU", total_memory_mib=8000, uuid="ee415e66-c0bf-45ba-a944-0c5fb2cd7fa3"),
"3": MockedGPU(name="3", model="mockedGPU", total_memory_mib=16000, uuid="af20175a-f19c-4962-8f2f-983d3038a87b")
})
atexit.register(lambda: scheduler.shutdown())
|
import re
import discord
import asyncio
import tokens
from phonetic import phonetic
client = discord.Client()
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
if message.content.startswith('!callme'):
r = re.compile('!callme: .*')
if r.match(message.content) is not None:
sp = message.content.split('!callme: ', 1)[1]
phon = phonetic.Phonetic()
usr_p = phon.add_phon(str(message.author), sp)
await client.send_message(message.server, usr_p)
else:
await client.send_message(message.server, 'Command does not match format. Format is !callme: YOUR TEXT HERE')
if message.content.startswith('!callme_remove'):
phon = phonetic.Phonetic()
stats = phon.del_phon(str(message.author))
await client.send_message(message.server, stats)
@client.event
async def on_voice_state_update(before, after):
if after.voice.voice_channel and before.voice.voice_channel is None:
server = after.server
phon = phonetic.Phonetic()
usr_phon = phon.find_name(after)
msg = str(usr_phon) + ' joined ' + after.voice.voice_channel.name
tmp = await client.send_message(server, msg, tts=True)
await client.delete_message(tmp)
elif after.voice.voice_channel is None:
server = before.server
phon = phonetic.Phonetic()
usr_phon = phon.find_name(before)
msg = str(usr_phon) + ' left the server'
tmp = await client.send_message(server, msg, tts=True)
await client.delete_message(tmp)
client.run(tokens.dt)
|
matrix = []
def is_valid(r, c, matrix):
n = len(matrix)
return 0 <= r < n and 0 <= c < n
for _ in range(8):
line = [x for x in input().split()]
matrix.append(line)
directions = {
'up':[-1,0],
'down': [1, 0],
'right':[0, 1],
'left': [0, -1],
'upleft':[-1, -1],
'upright':[-1, 1],
'downleft':[1, -1],
'downright':[1, 1]
}
queen_positions = []
for r in range(8):
for c in range(8):
if matrix[r][c] == 'Q':
queen_positions.append([r, c])
for queen in queen_positions:
queen_row = queen[0]
queen_col = queen[1]
for direction in directions:
change = directions[direction]
change_row = change[0]
change_col = change[1]
new_pos = [queen_row + change_row, queen_col + change_col]
new_row = new_pos[0]
new_col = new_pos[1]
while is_valid(new_row, new_col, matrix):
if matrix[new_row][new_col] == '.':
new_row += directions[direction][0]
new_col += directions[direction][1]
if matrix[new_row][new_col] == 'Q':
break
if matrix[new_row][new_col] == ' K':
print(queen)
continue
else:
continue |
fish_name = ['selmon roe','red bream',
'egg roll','shimp','kimbab', 'tuna']
fish_price = [1000,3000,1000,2000,1000,5000]
price = 0
for i in range(len(fish_name)):
price += fish_price[i]
print("Total price is",price)
fp = 0
price = 0
for fp in fish_price:
price += fp
print("Total price is",price) |
bl_info = {
"name": "Retopology",
"author": "Nikhil Sridhar",
"version": (2, 5, 2),
"blender": (2,80,0),
"location": "View3D > Sideshelf > Retopology",
"description": "Remesh/Retopologize",
"warning": "",
"wiki_url": "",
"category": "AFXLAB"}
import bpy
import bmesh
from mathutils import Vector
wm = bpy.context.window_manager
# progress from [0 - 1000]
def symmetry_remesh(self):
ob= bpy.context.active_object
bpy.ops.object.modifier_add(type='MIRROR')
bpy.context.object.modifiers["Mirror"].use_axis[0] = False
bpy.context.object.modifiers["Mirror"].merge_threshold = bpy.context.scene.s_merge
if bpy.context.object.s_axis == 'X':
ob.modifiers["Mirror"].use_axis[0] = True
ob.modifiers["Mirror"].use_bisect_axis[0] = True
else:
pass
if bpy.context.object.s_axis == 'Y':
ob.modifiers["Mirror"].use_axis[1] = True
ob.modifiers["Mirror"].use_bisect_axis[1] = True
else:
pass
if bpy.context.object.s_axis == 'Z':
ob.modifiers["Mirror"].use_axis[2] = True
ob.modifiers["Mirror"].use_bisect_axis[2] = True
else:
pass
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Mirror")
class RM_OT_relaxmethod(bpy.types.Operator):
"""Relax remesh object."""
bl_idname = "object.remesh_relax"
bl_label = "Remesh Relax"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
bm = bmesh.new()
bm.from_mesh(bpy.context.active_object.data)
strength = bpy.context.scene.relax_strength
tot = 50
wm = bpy.context.window_manager
for i in range(strength):
wm.progress_begin(0, tot)
for i in range(tot):
wm.progress_update(i)
for vert in bm.verts:
avg = Vector()
for edge in vert.link_edges:
other = edge.other_vert(vert)
avg += other.co
avg /= len(vert.link_edges)
avg -= vert.co
avg -= avg.dot(vert.normal) * vert.normal
vert.co += avg
bm.normal_update()
wm.progress_end()
bm.to_mesh(bpy.context.active_object.data)
bpy.context.active_object.data.update()
bpy.context.view_layer.update()
return {'FINISHED'}
def remesh_ff(self,context):
bpy.ops.object.modifier_add(type='REMESH')
bpy.context.object.modifiers["Remesh"].mode = 'SMOOTH'
bpy.context.object.modifiers["Remesh"].use_remove_disconnected = False
bpy.context.object.modifiers["Remesh"].scale = 1
bpy.context.object.modifiers["Remesh"].octree_depth = bpy.context.scene.remesh_depth
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Remesh")
def density_ff(self,context):
bpy.ops.object.mode_set(mode='SCULPT')
bpy.ops.sculpt.dynamic_topology_toggle()
bpy.context.scene.tool_settings.sculpt.detail_refine_method = 'SUBDIVIDE'
bpy.context.scene.tool_settings.sculpt.detail_type_method = 'CONSTANT'
bpy.context.scene.tool_settings.sculpt.constant_detail_resolution = bpy.context.scene.floodfill
#bpy.ops.sculpt.optimize()
bpy.context.view_layer.update()
bpy.ops.sculpt.detail_flood_fill()
def dynamic_remesh(self,context):
#progress bar
ob = bpy.context.active_object
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
og_scale= bpy.context.object.scale
dims = ob.dimensions
x, y, z = bpy.context.active_object.dimensions
if bpy.context.scene.keep_sculpt == True:
bpy.ops.object.mode_set(mode='OBJECT')
if bpy.context.object.mode == 'WEIGHT_PAINT':
bpy.ops.object.mode_set(mode='OBJECT')
else:
pass
ob = bpy.context.active_object
original = bpy.data.objects[ob.name]
scene = bpy.context.scene
for ob in bpy.context.selected_objects:
if ob.type == 'MESH' and ob.name.endswith("Remesh"):
ob.select_set(True)
bpy.ops.object.delete(use_global=False)
else:
pass
ob = bpy.context.active_object
ob.select_set(True)
#bpy.ops.object.duplicate(linked=False)
#bpy.context.object.scale = [15,15,15]
ob.dimensions = 25.0, 25.0, 25.0
bpy.ops.object.duplicate_move()
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
# dims = bpy.context.object.dimensions
# bpy.context.object.dimensions = 25.0, 25.0, 25.0
#remesh_ff(self,context)
#----------------------------
density_ff(self,context)
#----------------------------
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.vertices_smooth(factor=1)
#bpy.ops.mesh.tris_convert_to_quads(face_threshold=3.14159, shape_threshold=3.14159)
bpy.ops.object.mode_set(mode='OBJECT')
target = original
#DECIMATE MOD METHOD
bpy.ops.object.modifier_add(type='DECIMATE')
bpy.context.object.modifiers["Decimate"].ratio = bpy.context.scene.decimate
bpy.context.object.modifiers["Decimate"].vertex_group = "vRemesh"
bpy.context.object.modifiers["Decimate"].invert_vertex_group = True
bpy.context.object.modifiers["Decimate"].vertex_group_factor = bpy.context.scene.d_factor
bpy.context.object.modifiers["Decimate"].use_symmetry = True
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Decimate")
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
#bpy.ops.mesh.remove_doubles(threshold=1)
#bpy.ops.mesh.vertices_smooth(factor=1)
bpy.ops.mesh.tris_convert_to_quads(face_threshold=3.14159, shape_threshold=3.14159)
bpy.ops.object.mode_set(mode='OBJECT')
#bpy.ops.object.modifier_add(type='DISPLACE')
#bpy.context.object.modifiers["Displace"].strength = bpy.context.scene.displace
bpy.ops.object.modifier_add(type='SUBSURF')
bpy.ops.object.modifier_add(type='SHRINKWRAP')
#
###############
#bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.object.modifiers["Subdivision"].levels = bpy.context.scene.ccsubd
bpy.context.object.modifiers["Shrinkwrap"].target = target
bpy.context.object.modifiers["Shrinkwrap"].show_in_editmode = True
bpy.context.object.modifiers["Shrinkwrap"].wrap_method = 'PROJECT'
bpy.context.object.modifiers["Shrinkwrap"].use_negative_direction = True
bpy.ops.object.modifier_add(type='SMOOTH')
bpy.context.object.modifiers["Smooth"].factor = bpy.context.scene.smooth_factor
bpy.context.object.modifiers["Smooth"].iterations = 1
#bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Subdivision")
#bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Smooth")
#bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Shrinkwrap")
if bpy.context.object.modifiers["Subdivision"].levels == 0:
bpy.ops.object.modifier_remove(modifier="Subdivision")
else:
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Subdivision")
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Shrinkwrap")
if bpy.context.object.modifiers["Smooth"].factor == 0:
bpy.ops.object.modifier_remove(modifier="Smooth")
else:
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Smooth")
#bpy.ops.object.convert(target='MESH')
bpy.context.object.name = bpy.context.object.name+"_Remesh"
#bpy.ops.object.parent_clear(type='CLEAR')
if bpy.context.scene.xray_mesh == True:
bpy.context.object.show_in_front = True
bpy.context.object.show_wire = True
bpy.context.object.show_all_edges = True
else:
#bpy.context.object.display_type = 'WIRE'
pass
if bpy.context.scene.enable_sym == True:
#bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
symmetry_remesh(self)
else:
pass
bpy.context.object.dimensions = x, y, z
bpy.context.view_layer.objects.active = ob
bpy.context.object.dimensions = x, y, z
#ob.select_set(True)
bpy.context.object.location= ob.location
#bpy.ops.object.mode_set(mode='SCULPT')
#bpy.ops.object.select_all(action='DESELECT')
if bpy.context.scene.keep_sculpt == True:
bpy.ops.object.mode_set(mode='SCULPT')
else:
pass
class QR_OT_remesh(bpy.types.Operator):
"""Quad-Remesh Dyntopo Model"""
bl_idname = 'mesh.quadremesh'
bl_label = "Remeshe Dyntopo Model with high number of tris."
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
#progress
# wm = bpy.context.window_manager
# tot =1000
# wm.progress_begin(0, tot)
#
# for i in range(tot):
# wm.progress_update(i)
dynamic_remesh(self,context)
#wm.progress_end()
return {'FINISHED'}
def update_decimate(self,context):
if bpy.context.scene.auto_update == True:
dynamic_remesh(self,context)
else:
pass
def update_presetsbar(self,context):
if bpy.context.object.presets_bar == '0.005':
bpy.context.scene.floodfill = 2.5
bpy.context.scene.decimate = 0.009
bpy.context.scene.ccsubd = 2
else:
pass
if bpy.context.object.presets_bar == '0.05':
bpy.context.scene.floodfill = 0.6
bpy.context.scene.ccsubd = 2
bpy.context.scene.decimate = 0.01
else:
pass
if bpy.context.object.presets_bar == '0.1':
bpy.context.scene.floodfill = 0.3
bpy.context.scene.decimate = 0.1
bpy.context.scene.ccsubd = 2
else:
pass
def update_subd(self,context):
if bpy.context.scene.auto_update == True:
dynamic_remesh(self,context)
else:
pass
def weightp(self,context):
ob = bpy.context.active_object
if ob.vertex_groups:
pass
else:
bpy.ops.object.vertex_group_add()
for vgroup in ob.vertex_groups:
if vgroup.name.startswith("Group"):
vgroup.name = "vRemesh"
bpy.ops.object.mode_set(mode='WEIGHT_PAINT')
class WP_OT_weightpaint(bpy.types.Operator):
"""Weight Paint Mode."""
bl_idname = "object.wp_mode"
bl_label = "WP_MODE"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
weightp(self,context)
return {'FINISHED'}
def oops(self, context):
self.layout.label(text="Woah! Pretty dense model, try adding a Decimate Modifier, Lower Ratio, & Apply")
def recommend_op(self,context):
ob = bpy.context.active_object
obj = bpy.context.view_layer.objects.active
data = obj.data
total_triangles = 0
for face in data.polygons:
vertices = face.vertices
triangles = len(vertices) - 2
total_triangles += triangles
print(total_triangles)
#split = layout.split(factor=1)
l = range(500,5000)
if total_triangles in l:
bpy.context.object.preset_indicator = 'L'
m = range(5000,10000)
if total_triangles in m:
bpy.context.object.preset_indicator = 'M'
h = range(10000,1000000)
if total_triangles in h:
bpy.context.object.preset_indicator = 'H'
def density_check(self, context):
self.layout.label(text="Woah! This model is pretty dense, try adding a Decimate Modifier > Lower Ratio > Apply")
class ROP_OT_recommendop(bpy.types.Operator):
"""Recommended option."""
bl_idname = "object.recommendop"
bl_label = "Recommend Options"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
ob = bpy.context.active_object
obj = bpy.context.view_layer.objects.active
data = obj.data
total_triangles = 0
for face in data.polygons:
vertices = face.vertices
triangles = len(vertices) - 2
total_triangles += triangles
print(total_triangles)
h = range(100000,10000000)
if total_triangles in h:
bpy.context.window_manager.popup_menu(density_check, title="Suggestion", icon='ERROR')
recommend_op(self,context)
#self.report({'INFO'}, 'Printing report to Info window.')
return {'FINISHED'}
bpy.types.Scene.decimate = bpy.props.FloatProperty(min = 0.0001, max = 1.0, default = 0.02, description="Decimate Factor: How much to decimate before remesh", update=update_decimate)
bpy.types.Scene.d_factor = bpy.props.FloatProperty(min = 0.0, max = 1000.0, default = 100.0, description="Decimate Factor: How much to decimate before remesh", update=update_decimate)
bpy.types.Scene.smooth_factor = bpy.props.FloatProperty(min = -2.0, max = 4.5, default = 1.0, description="Smoothing Factor: How much smoothness to apply after remesh", update=update_decimate)
bpy.types.Scene.ccsubd = bpy.props.IntProperty(min = 0, max = 6, default = 2, description="Times to subdivide after remesh", update=update_subd)
bpy.types.Scene.keep_sculpt = bpy.props.BoolProperty(name="keep_sculpt", default=False,description = "Keep sculpting mode enabled")
bpy.types.Scene.auto_update = bpy.props.BoolProperty(name="auto_update", default=False,description = "Auto-update settings when changing them.")
bpy.types.Scene.displace = bpy.props.FloatProperty(min = -10.0, max = 5.0, default = 1, description="Projection Factor", update=update_decimate)
bpy.types.Scene.xray_mesh = bpy.props.BoolProperty(name="xray_mesh", default=False,description = "Enable X-Ray.")
bpy.types.Scene.enable_sym = bpy.props.BoolProperty(name="enable_sym", default=False,description = "Enable Symmetry.")
bpy.types.Scene.s_merge = bpy.props.FloatProperty(min = 0.0, max = 0.2, default = 0.001, description="Symmetry Merge Limiit", update=update_decimate)
bpy.types.Scene.floodfill = bpy.props.FloatProperty(min = 0.02, max = 5.0, default = 0.5, description="Flood Fill Resolution", update=update_decimate)
bpy.types.Scene.relax_strength = bpy.props.IntProperty(min = 1, max = 50, default = 20, description="Relax strength value", update=None)
bpy.types.Scene.remesh_depth = bpy.props.IntProperty(min = 1, max = 8, default = 5, description="Remesh Depth", update=update_decimate)
class DR_PT_panel(bpy.types.Panel):
bl_category = "Retopology"
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
#bl_context = "editmode"
bl_label = "Retopology"
def draw(self,context):
layout = self.layout
ob = bpy.context.active_object
sculpt = context.tool_settings.sculpt
if ob is not None:
row = layout.split(align=True)
row.prop(context.scene, "xray_mesh", text='', icon = 'HIDE_OFF')
row.prop(context.scene, "auto_update",text='',icon= 'FILE_REFRESH')
row.prop(context.scene, "keep_sculpt", text='',icon = 'SCULPTMODE_HLT')
row.prop(context.scene, "enable_sym", text='', icon = 'UV_ISLANDSEL')
row = layout.row(align=True)
if bpy.context.scene.enable_sym == True:
row.prop(ob, "s_axis", expand=True)
row = layout.row(align=True)
row.prop(context.scene, "s_merge",text="Merge Limit", slider=False)
#layout = self.layout
#split = layout.split(factor=1)
col = layout.split(align=True,factor=1)
col.operator("object.recommendop",text="Detect Polycount",icon = 'SHADERFX')
col.scale_y = 1.4
col = layout.split(align=True,factor=0.01)
col.prop(ob, "preset_indicator", expand=True)
col.prop(ob, "presets_bar", expand=True)
col.scale_y = 1.4
#if total_triangles == 3804:
row = layout.row(align=True)
row = row.column(align=True)
row.operator("object.wp_mode",text="Weight Paint",icon = 'MOD_VERTEX_WEIGHT')
row.prop(context.scene, "d_factor",text="Weight Factor", slider=False)
#row.scale_y = 1.4
#row = row.row(align=True)
row.prop(context.scene, "floodfill",text="Density", slider=False)
#row.prop(context.scene, "remesh_depth",text="Depth", slider=False)
row.prop(context.scene, "decimate",text="Decimate", slider=False)
row.scale_y = 1.7
row = layout.row(align=True)
row = row.column(align=True)
row.prop(context.scene, "ccsubd",text="Subdivisions", slider=False)
#row.prop(context.scene, "displace",text="Relax", slider=False)
row.prop(context.scene, "smooth_factor",text="Smoothness", slider=False)
row.prop(context.scene, "relax_strength",text="Relax Strength", slider=True)
row.operator("object.remesh_relax",text="Relax",icon = 'MESH_GRID')
row.scale_y = 1.7
row = layout.row(align=True)
row = layout.row(align=True)
row.operator(QR_OT_remesh.bl_idname, text="Remesh", icon = 'MOD_REMESH')
row.scale_y = 2.0
# if (sculpt.detail_type_method == 'CONSTANT'):
# row.prop(sculpt, "constant_detail_resolution")
# row.operator("sculpt.sample_detail_size", text="", icon='EYEDROPPER')
else:
layout = self.layout
layout.label(text="Select your model first", icon = 'URL')
layout.scale_y = 2.0
classes = ( QR_OT_remesh, WP_OT_weightpaint,ROP_OT_recommendop,RM_OT_relaxmethod, DR_PT_panel)
def register():
#bpy.utils.register_module(__name__)
from bpy.utils import register_class
for cls in classes:
register_class(cls)
bpy.types.Object.s_axis = bpy.props.EnumProperty(
name="Axis",
description="Symmetry Axis",
items=[("X","X","X-axis",'',0),
("Y","Y","Y-axis",'',1),
("Z","Z","Z-axis",'',2)
],
default= None,
update= update_decimate
#options= {'ENUM_FLAG'},
)
bpy.types.Object.presets_bar = bpy.props.EnumProperty(
name="Preset Bar",
description="Preset Bar: Recommends settings depending on your models poly count.",
items=[("0.005","High","High Polycount",'',0),
("0.05","Medium" ,"Medium Polycount",'',1),
("0.1","Low","Low Polycount",'',2)
],
default= '0.1',
update= update_presetsbar,
#options= {'ENUM_FLAG'},
)
bpy.types.Object.preset_indicator= bpy.props.EnumProperty(
name="Preset Indicator",
description="Preset Indicator: Recommends settings depending on your models poly count.",
items=[("H","","High Polycount",'',0),
("M","","Medium Polycount",'',1),
("L","","Low Polycount",'',2)
],
default= 'L',
update= None,
options= {'HIDDEN'},
)
bpy.types.Scene.decimate = bpy.props.FloatProperty(min = 0.0001, max = 1.0, default = 0.02, description="Decimate Factor: How much to decimate before remesh", update=update_decimate)
bpy.types.Scene.d_factor = bpy.props.FloatProperty(min = 0.0, max = 1000.0, default = 100.0, description="Weight Factor: Density on painted weight", update=update_decimate)
bpy.types.Scene.smooth_factor = bpy.props.FloatProperty(min = -2.0, max = 4.5, default = 1.0, description="Smoothing Factor: How much smoothness to apply after remesh", update=update_decimate)
bpy.types.Scene.ccsubd = bpy.props.IntProperty(min = 0, max = 6, default = 2, description="Subdivisions after remesh", update=update_subd)
bpy.types.Scene.keep_sculpt = bpy.props.BoolProperty(name="keep_sculpt", default=False,description = "Keep sculpting mode enabled")
bpy.types.Scene.auto_update = bpy.props.BoolProperty(name="auto_update", default=False,description = "Auto-update settings when changing them.")
bpy.types.Scene.displace = bpy.props.FloatProperty(min = -10.0, max = 5.0, default = 1, description="Projection Factor", update=update_decimate)
bpy.types.Scene.xray_mesh = bpy.props.BoolProperty(name="xray_mesh", default=False,description = "Enable X-Ray.")
bpy.types.Scene.enable_sym = bpy.props.BoolProperty(name="enable_sym", default=False,description = "Enable Symmetry.")
bpy.types.Scene.s_merge = bpy.props.FloatProperty(min = 0.0, max = 0.2, default = 0.001, description="Symmetry Merge Limiit", update=update_decimate)
bpy.types.Scene.floodfill = bpy.props.FloatProperty(min = 0.02, max = 5.0, default = 0.5, description="Flood Fill Resolution", update=update_decimate)
bpy.types.Scene.relax_strength = bpy.props.IntProperty(min = 1, max = 50, default = 20, description="Relax strength value", update=None)
bpy.types.Scene.remesh_depth = bpy.props.IntProperty(min = 1, max = 8, default = 5, description="Remesh Depth", update=update_decimate)
def unregister():
#bpy.utils.unregister_module(__name__)
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
if __name__ == "__main__":
register()
|
from picas.documents import Document, Task
from picas.util import seconds
from nose.tools import assert_equals, assert_raises, assert_true
''' @author Joris Borgdorff '''
test_id = 'mydoc'
test_other_id = 'myotherdoc'
def test_create():
doc = Document({'_id': test_id})
assert_equals(doc.id, test_id)
assert_equals(doc.value, {'_id': test_id})
doc.id = test_other_id
assert_equals(doc.id, test_other_id)
assert_equals(doc.value, {'_id': test_other_id})
def test_no_id():
doc = Document({'someattr': 1})
assert_raises(AttributeError, getattr, doc, 'id')
assert_raises(AttributeError, getattr, doc, 'rev')
def test_empty():
Document({})
def test_attachment():
doc = Document()
data = b"This is it"
doc.put_attachment('mytext.txt', data)
attach = doc.get_attachment('mytext.txt')
assert_equals(attach['content_type'], 'text/plain')
assert_equals(attach['data'], data)
assert_equals(doc['_attachments']['mytext.txt']['data'],
'VGhpcyBpcyBpdA==')
doc.remove_attachment('mytext.txt')
assert_true('mytext.txt' not in doc['_attachments'])
assert_equals(attach['data'], data)
doc.put_attachment('mytext.json', b'{}')
attach = doc.get_attachment('mytext.json')
assert_equals(attach['content_type'], 'application/json')
class TestTask:
def setup(self):
self.task = Task({'_id': test_id})
def test_id(self):
assert_equals(self.task.id, test_id)
assert_equals(self.task.value['_id'], test_id)
assert_equals(self.task['_id'], test_id)
def test_no_id(self):
t = Task()
assert_true(len(t.id) > 10)
def test_done(self):
assert_equals(self.task['done'], 0)
self.task.done()
assert_true(self.task['done'] >= seconds() - 1)
def test_lock(self):
assert_equals(self.task['lock'], 0)
self.task.lock()
assert_true(self.task['lock'] >= seconds() - 1)
def test_scrub(self):
self.task.lock()
self.task.done()
self.task.scrub()
assert_equals(self.task['lock'], 0)
assert_equals(self.task['done'], 0)
assert_equals(self.task['scrub_count'], 1)
self.task.scrub()
assert_equals(self.task['lock'], 0)
assert_equals(self.task['done'], 0)
assert_equals(self.task['scrub_count'], 2)
def test_error(self):
self.task.error("some message")
assert_equals(self.task['lock'], -1)
assert_equals(self.task['done'], -1)
self.task.scrub()
assert_equals(self.task['lock'], 0)
assert_equals(self.task['done'], 0)
assert_equals(len(self.task['error']), 1)
|
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
current_r, current_c, current_d = map(int, input().split())
dx = [-1, 0, 1, 0] # 북, 동, 남, 서
dy = [0, 1, 0, -1]
board = []
for i in range(n):
board.append(list(map(int, input().split())))
visited = [[0] * m for i in range(n)] # 청소기가 청소한 곳
count = 0
while True:
if visited[current_r][current_c] == 0: # 후진 같은 경우, 겹치므로
count += 1
visited[current_r][current_c] = 1 # 청소
flag = False # 청소할 곳이 있는 경우
nd = current_d
for i in range(4):
nd = 3 if nd == 0 else nd - 1
nx = current_r + dx[nd]
ny = current_c + dy[nd]
if board[nx][ny] == 0 and visited[nx][ny] == 0: # 청소할 수 있는 곳이라면
current_r = nx
current_c = ny
current_d = nd
flag = True
break
if not flag:
# 네 방향 다 청소했거나 벽인 경우, 후진
nx = current_r - dx[current_d]
ny = current_c - dy[current_d]
if board[nx][ny] == 0: # 벽이 아닌 경우
current_r = nx
current_c = ny
else:
break
print(count)
|
# Binary Search Tree Checker
# Write a function to check that a binary tree is a valid binary search tree.
# class BinaryTreeNode:
#
# def __init__(self, value):
# self.value = value
# self.left = None
# self.right = None
#
# def insert_left(self, value):
# self.left = BinaryTreeNode(value)
# return self.left
#
# def insert_right(self, value):
# self.right = BinaryTreeNode(value)
# return self.right
def is_valid_bst(root):
node_stack = [(root, -float('inf'), float('inf'))]
while len(node_stack):
node, lower_bound, upper_bound = node_and_bounds_stack.pop()
if (node.value <= lower_bound) or (node.value >= upper_bound):
return False
if node.left:
node_stack.append(node.left, lower_bound, node.value))
if node_right:
node_stack.append((node.right, node.value, upper_bound))
return True
|
#
# test_http
#
# Copyright (c) 2011-2021 Akinori Hattori <hattya@gmail.com>
#
# SPDX-License-Identifier: MIT
#
import ayame
from ayame import http
from base import AyameTestCase
class HTTPTestCase(AyameTestCase):
def assertStatus(self, st, code, reason, superclass=None):
self.assertEqual(st.code, code)
self.assertEqual(st.reason, reason)
self.assertEqual(st.status, '' if code == 0 else f'{code} {reason}')
if superclass is None:
self.assertIsInstance(st, object)
self.assertEqual(str(st), st.status)
else:
self.assertIsInstance(st, type)
self.assertTrue(issubclass(st, superclass))
def new_environ(self, data=None, form=None):
return super().new_environ(method='POST',
data=data,
form=form)
def test_parse_accept(self):
self.assertEqual(http.parse_accept(''), ())
self.assertEqual(http.parse_accept('ja, en'), (('ja', 1.0), ('en', 1.0)))
self.assertEqual(http.parse_accept('en, ja'), (('en', 1.0), ('ja', 1.0)))
self.assertEqual(http.parse_accept('en; q=0.7, ja'), (('ja', 1.0), ('en', 0.7)))
# invalid
self.assertEqual(http.parse_accept('ja, en; q=33.3333'), (('ja', 1.0), ('en', 1.0)))
self.assertEqual(http.parse_accept('ja, en, q=0.7'), (('ja', 1.0), ('en', 1.0), ('q=0.7', 1.0)))
def test_parse_form_data_empty(self):
self.assertEqual(http.parse_form_data(self.new_environ()), {})
self.assertEqual(http.parse_form_data(self.new_environ(data='')), {})
self.assertEqual(http.parse_form_data(self.new_environ(form='')), {})
def test_parse_form_data_ascii(self):
data = ('x=-1&'
'y=-1&'
'y=-2&'
'z=-1&'
'z=-2&'
'z=-3')
self.assertEqual(http.parse_form_data(self.new_environ(data=data)), {
'x': ['-1'],
'y': ['-1', '-2'],
'z': ['-1', '-2', '-3'],
})
data = self.form_data(('x', '-1'),
('y', '-1'),
('y', '-2'),
('z', '-1'),
('z', '-2'),
('z', '-3'))
self.assertEqual(http.parse_form_data(self.new_environ(form=data)), {
'x': ['-1'],
'y': ['-1', '-2'],
'z': ['-1', '-2', '-3'],
})
def test_parse_form_data_utf_8(self):
data = ('\u3082=\u767e&'
'\u305b=\u767e&'
'\u305b=\u5343&'
'\u3059=\u767e&'
'\u3059=\u5343&'
'\u3059=\u4e07')
self.assertEqual(http.parse_form_data(self.new_environ(data=data)), {
'\u3082': ['\u767e'],
'\u305b': ['\u767e', '\u5343'],
'\u3059': ['\u767e', '\u5343', '\u4e07'],
})
data = self.form_data(('\u3082', '\u767e'),
('\u305b', '\u767e'),
('\u305b', '\u5343'),
('\u3059', '\u767e'),
('\u3059', '\u5343'),
('\u3059', '\u4e07'))
self.assertEqual(http.parse_form_data(self.new_environ(form=data)), {
'\u3082': ['\u767e'],
'\u305b': ['\u767e', '\u5343'],
'\u3059': ['\u767e', '\u5343', '\u4e07'],
})
def test_parse_form_data_post(self):
data = self.form_data(('a', ('\u3044', 'spam\neggs\nham\n', 'text/plain')))
form_data = http.parse_form_data(self.new_environ(form=data))
self.assertEqual(list(form_data), ['a'])
self.assertEqual(len(form_data['a']), 1)
a = form_data['a'][0]
self.assertEqual(a.name, 'a')
self.assertEqual(a.filename, '\u3044')
self.assertEqual(a.value, b'spam\neggs\nham\n')
def test_parse_form_data_put(self):
data = 'spam\neggs\nham\n'
environ = self.new_environ(data=data)
environ.update(REQUEST_METHOD='PUT',
CONTENT_TYPE='text/plain')
self.assertEqual(http.parse_form_data(environ), {})
def test_parse_form_data_http_408(self):
data = self.form_data(('a', ('a.txt', '', 'text/plain')))
environ = self.new_environ(form=data[:-20])
environ.update(CONTENT_LENGTH=str(len(data) * 2))
with self.assertRaises(http.RequestTimeout):
http.parse_form_data(environ)
def test_http_status(self):
args = (0, '', ayame.AyameError)
self.assertStatus(http.HTTPStatus, *args)
st = http.HTTPStatus()
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, [])
self.assertEqual(st.description, '')
class ST(http.HTTPStatus):
code = -1
reason = None
status = None
self.assertEqual(ST.code, -1)
self.assertIsNone(ST.reason)
self.assertIsNone(ST.status)
st = ST()
self.assertEqual(st.code, -1)
self.assertIsNone(st.reason)
self.assertIsNone(st.status)
self.assertEqual(st.headers, [])
self.assertEqual(st.description, '')
def test_http_200(self):
args = (200, 'OK', http.HTTPSuccessful)
self.assertStatus(http.OK, *args)
st = http.OK()
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, [])
self.assertEqual(st.description, '')
def test_http_201(self):
args = (201, 'Created', http.HTTPSuccessful)
self.assertStatus(http.Created, *args)
st = http.Created()
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, [])
self.assertEqual(st.description, '')
def test_http_202(self):
args = (202, 'Accepted', http.HTTPSuccessful)
self.assertStatus(http.Accepted, *args)
st = http.Accepted()
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, [])
self.assertEqual(st.description, '')
def test_http_204(self):
args = (204, 'No Content', http.HTTPSuccessful)
self.assertStatus(http.NoContent, *args)
st = http.NoContent()
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, [])
self.assertEqual(st.description, '')
def test_http_301(self):
args = (301, 'Moved Permanently', http.HTTPRedirection)
self.assertStatus(http.MovedPermanently, *args)
def assert3xx(st, uri, headers):
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, headers)
self.assertIsNot(st.headers, headers)
self.assertIn(uri, st.description)
uri = 'http://localhost/'
headers = [('Server', 'Python')]
assert3xx(http.MovedPermanently(uri), uri, [
('Location', uri),
])
assert3xx(http.MovedPermanently(uri, headers), uri, [
('Server', 'Python'),
('Location', uri),
])
self.assertEqual(headers, [('Server', 'Python')])
def test_http_302(self):
args = (302, 'Found', http.HTTPRedirection)
self.assertStatus(http.Found, *args)
def assert3xx(st, uri, headers):
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, headers)
self.assertIsNot(st.headers, headers)
self.assertIn(uri, st.description)
uri = 'http://localhost/'
headers = [('Server', 'Python')]
assert3xx(http.Found(uri), uri, [
('Location', uri),
])
assert3xx(http.Found(uri, headers), uri, [
('Server', 'Python'),
('Location', uri),
])
self.assertEqual(headers, [('Server', 'Python')])
def test_http_303(self):
args = (303, 'See Other', http.HTTPRedirection)
self.assertStatus(http.SeeOther, *args)
def assert3xx(st, uri, headers):
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, headers)
self.assertIsNot(st.headers, headers)
self.assertIn(uri, st.description)
uri = 'http://localhost/'
headers = [('Server', 'Python')]
assert3xx(http.SeeOther(uri), uri, [
('Location', uri),
])
assert3xx(http.SeeOther(uri, headers), uri, [
('Server', 'Python'),
('Location', uri),
])
self.assertEqual(headers, [('Server', 'Python')])
def test_http_304(self):
args = (304, 'Not Modified', http.HTTPRedirection)
self.assertStatus(http.NotModified, *args)
st = http.NotModified()
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, [])
self.assertEqual(st.description, '')
def test_http_400(self):
args = (400, 'Bad Request', http.HTTPClientError)
self.assertStatus(http.BadRequest, *args)
st = http.BadRequest()
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, [])
self.assertEqual(st.description, '')
def test_http_401(self):
args = (401, 'Unauthrized', http.HTTPClientError)
self.assertStatus(http.Unauthrized, *args)
def assert4xx(st, headers):
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, headers)
self.assertIsNot(st.headers, headers)
self.assertTrue(st.description)
headers = []
assert4xx(http.Unauthrized(), headers)
assert4xx(http.Unauthrized(headers), headers)
self.assertEqual(headers, [])
def test_http_403(self):
args = (403, 'Forbidden', http.HTTPClientError)
self.assertStatus(http.Forbidden, *args)
def assert4xx(st, uri, headers):
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, headers)
self.assertIsNot(st.headers, headers)
self.assertIn(uri, st.description)
uri = 'http://localhsot/'
headers = []
assert4xx(http.Forbidden(uri), uri, headers)
assert4xx(http.Forbidden(uri, headers), uri, headers)
self.assertEqual(headers, [])
def test_http_404(self):
args = (404, 'Not Found', http.HTTPClientError)
self.assertStatus(http.NotFound, *args)
def assert4xx(st, uri, headers):
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, headers)
self.assertIsNot(st.headers, headers)
self.assertIn(uri, st.description)
uri = 'http://localhsot/'
headers = []
assert4xx(http.NotFound(uri), uri, headers)
assert4xx(http.NotFound(uri, headers), uri, headers)
self.assertEqual(headers, [])
def test_http_405(self):
args = (405, 'Method Not Allowed', http.HTTPClientError)
self.assertStatus(http.MethodNotAllowed, *args)
def assert4xx(st, method, uri, headers):
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, headers)
self.assertIsNot(st.headers, headers)
self.assertIn(method, st.description)
self.assertIn(uri, st.description)
method = 'PUT'
uri = 'http://localhost/'
allow = ['GET', 'POST']
headers = [('Server', 'Python')]
assert4xx(http.MethodNotAllowed(method, uri, allow), method, uri, [
('Allow', 'GET, POST'),
])
assert4xx(http.MethodNotAllowed(method, uri, allow, headers), method, uri, [
('Server', 'Python'),
('Allow', 'GET, POST'),
])
self.assertEqual(headers, [('Server', 'Python')])
def test_http_408(self):
args = (408, 'Request Timeout', http.HTTPClientError)
self.assertStatus(http.RequestTimeout, *args)
def assert4xx(st, headers):
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, headers)
self.assertIsNot(st.headers, headers)
self.assertTrue(st.description)
headers = []
assert4xx(http.RequestTimeout(), headers)
assert4xx(http.RequestTimeout(headers), headers)
self.assertEqual(headers, [])
def test_http_500(self):
args = (500, 'Internal Server Error', http.HTTPServerError)
self.assertStatus(http.InternalServerError, *args)
st = http.InternalServerError()
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, [])
self.assertEqual(st.description, '')
def test_http_501(self):
args = (501, 'Not Implemented', http.HTTPServerError)
self.assertStatus(http.NotImplemented, *args)
method = 'PUT'
uri = 'http://localhsot/'
st = http.NotImplemented(method, uri)
self.assertStatus(st, *args[:-1])
self.assertEqual(st.headers, [])
self.assertIn(method, st.description)
self.assertIn(uri, st.description)
|
def sum_double(a,b):
if a==b:
return 2*(a+b)
return a+b
print sum_double(1,2)
print sum_double(3,2)
print sum_double(2,2)
print sum_double(3,3)
|
import json
import logging
import requests
from EntityLoader import LoadContext, Loading
from github_loading import GithubLoadBehaviour
class SimplePageableBehaviour(GithubLoadBehaviour):
def __init__(self,
_token: str,
per_page: int,
_logger: logging.Logger,
_loading_obj: str,
_base_url: str,
_headers: str,
_params: str,
_token_id: int,
_proc_uuid: str):
super().__init__(_token, per_page, _logger)
self._loading_obj_name = _loading_obj
self._base_url = _base_url
self._headers = _headers
self._params = _params
self._token_id = _token_id
self._proc_uuid = _proc_uuid
def _build_url(self) -> str:
return self._base_url
def handle_error(self, obj: LoadContext, e: Exception, loading: Loading):
self._logger.error('url: {}, loading_id: {}, error with message: {}'.format(obj.url, loading.id, str(e)))
def get_load_context(self):
return LoadContext(
self._build_url(),
params=self._get_params(None),
headers=self._get_headers(),
obj={'page': 1, 'remaining': -1, 'token_id': self._token_id, 'proc_uuid': self._proc_uuid}
)
def _get_params(self, page: int) -> dict:
_prms = json.loads(self._params)
if page:
_prms['page'] = page
return _prms
def _get_headers(self) -> dict:
_hdrs = json.loads(self._headers)
_hdrs['Authorization'] = 'token {}'.format(self._token)
return _hdrs
def load(self, obj: LoadContext, loading: Loading):
current_page = obj.obj['page']
_token_id = obj.obj.get('token_id', None)
_proc_uuid = obj.obj.get('proc_uuid', None)
url = '{}{}'.format(loading.url, self._get_url_params(obj.params))
resp = requests.get(url, headers=obj.headers)
resp_status = int(resp.status_code)
remaining_limit = self._get_remaining_limit(resp)
next_page = self._get_next_page(current_page, resp)
rv_objs = []
if resp_status < 400:
rv_objs = json.loads(resp.text)
self._logger.info('token_id: {}, proc_uuid: {}, type: {}, state: {}, page: {}, count: {}, limit: {}, url: {}'.format(
_token_id, _proc_uuid,
self._loading_obj_name, resp.status_code, current_page,
len(rv_objs), remaining_limit, url
))
if int(remaining_limit if remaining_limit else 1) <= 0:
self._logger.warn('token_id {} is expired'.format(_token_id))
load_result = obj.get_simplified_load_result(
rv_objs,
LoadContext(
self._build_url(),
params=self._get_params(next_page),
headers=self._get_headers(),
obj={'page': next_page, 'remaining': -1}
) if not self._is_last_page(len(rv_objs), resp) else None
)
load_result.resp_headers = dict(resp.headers)
load_result.resp_text_data = resp.text
load_result.resp_status = resp_status
return load_result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.