text stringlengths 38 1.54M |
|---|
#https://github.com/Tanganelli/CoAPthon
from coapthon.server.coap import CoAP
from coapthon.resources.resource import Resource
import logging
logging.basicConfig(level=logging.ERROR)
logger = logging.getLogger(__name__)
class TesteResource(Resource):
def __init__(self, name="TesteResource", coap_server=None):
super(TesteResource, self).__init__(name, coap_server, visible=True,
observable=True, allow_children=True)
self.payload = "Teste Resource"
def render_GET(self, request):
return self
def render_PUT(self, request):
self.payload = request.payload
logger.debug ("payload:",self.payload)
return self
def render_POST(self, request):
res = TesteResource()
res.location_query = request.uri_query
res.payload = request.payload
return res
def render_DELETE(self, request):
return True
|
#导入Flask类库
from flask import Flask,request,make_response,redirect,url_for,abort,session
#导入类库
from flask_script import Manager
#创建应用实例
app=Flask(__name__)
app.config['SECRET_KEY'] = '加密用的秘钥字符串'
# 创建对象
manager = Manager(app)
#视图函数
@app.route('/')
def index():
return '<h1>Hello AAAAAAGGFlask!</h1>'
@app.route('/user/<username>')
def welcome(username):
return '<h1>HELLO,你妹的 %s!</h1>' % username
#带类型限定的参数
@app.route('/test/<path:info>')
def test(info):
return info
@app.route('/request/<path:info>')
def url(info):
#完整的请求的url
# return request.url
#去掉GET参数的URL
# return request.base_url
#只有主机和端口号的URL
# return request.host_url
#装饰器中写的路由地址
# return request.path
#客户端的IP地址
# return request.remote_addr
#请求方法类型GET/POST
# return request.method
#所有的GET参数都保存到args字典中
# return str(request.args)
#所有的请求头信息都在headers字段中
return request.headers.get('User-Agent')
#响应构造
@app.route('/response/')
def response():
#不指定状态码,默认为200,表示OK
# return 'OK'
#可以指定状态码,以元祖的形式
# return '你TM在逗我?',888
#先构造一个响应,然后返回,构造是可以指定状态码
resp=make_response('HELLO 你大爷',999)
return resp
#重定向
@app.route('/old/')
def old():
# return '这是最开始'
#重定向到指定网址
# return redirect('https://www.baidu.com')
# return redirect('/new/')
return redirect(url_for('welcome',username='妹总'))
@app.route('/new/')
def new():
return '这是新内容'
#终止abort
@app.route('/login/')
def login():
# return '欢迎登录'
abort(200)
#会话控制session
@app.route('/set_session/')
def set_session():
session['username']='xiaoming'
return 'session已设置'
@app.route('/get_session/')
def get_session():
return session.get('username','who are you')
#启动实例(只在当前模块运行)
if __name__=='__main__':
# app.run(debug=True,threaded=True)
manager.run()
#debug:是否开启调试模式(代码更新可以自动重启),默认为False
#threaded:是否开启多线程,默认为False
#port:指定端口号,默认为5000
#host:指定主机,默认127.0.0.1,设置为0.0.0.0可以通过IP访问
|
import functools
import requests
import suds.transport as transport
import traceback
from six import BytesIO
__all__ = ['RequestsTransport']
def handle_errors(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except requests.HTTPError as e:
buf = BytesIO(e.response.content)
raise transport.TransportError(
'Error in requests\n' + traceback.format_exc(),
e.response.status_code,
buf,
)
except requests.RequestException:
buf = BytesIO(traceback.format_exc().encode('utf-8'))
raise transport.TransportError(
'Error in requests\n' + traceback.format_exc(),
000,
buf,
)
return wrapper
class RequestsTransport(transport.Transport):
def __init__(self, session=None):
transport.Transport.__init__(self)
self._session = session or requests.Session()
@handle_errors
def open(self, request):
resp = self._session.get(request.url)
resp.raise_for_status()
return BytesIO(resp.content)
@handle_errors
def send(self, request):
resp = self._session.post(
request.url,
data=request.message,
headers=request.headers,
)
if resp.headers.get('content-type') not in ('text/xml',
'application/soap+xml'):
resp.raise_for_status()
return transport.Reply(
resp.status_code,
resp.headers,
resp.content,
)
|
import pymysql.cursors
from src.classes import part_class
from src.functions import functions
parts_set = functions.create_parts() # Вызываем функцию создания партий
print(parts_set[14].further_time)
|
# -*- coding: utf-8 -*-
"""
Column Carver Thresh
Uses Skimage line detection to cut columns
Created on Wed Sep 25 09:46:01 2019
@author: Carver Coleman
"""
import os
import skimage
import numpy as np
import cv2
import copy
from glob import glob
TARGET_COLS = 3
DEBUG = True
os.chdir("INSERT_PATH_TO_FOLDER_WITH_IMAGES')
# Finds lines for thresh, minline, and maxlinegap variables
def cutter(x, g, l):
edges = cv2.Canny(th3,50,150,apertureSize = 3)
lines = cv2.HoughLinesP(image=edges, rho=vrho, theta=np.pi/45, threshold=x, lines=np.array([]), minLineLength=l, maxLineGap=g)
a,b,c = lines.shape
for i in range(a):
cv2.line(gray2, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv2.LINE_AA)
final_lines = []
for z in range(len(lines)):
final_lines.append(lines[z][0][0])
final_lines.sort()
return final_lines
# Saves column cuts to folder 'split_cols' in original folder
def image_printer (index, col_begin, filename, DEBUG):
final_vthresh = BEGINNING_THRESHOLD + (index * INCREMENT_VALUE)
final_lines = cutter(x = final_vthresh, g = vmaxLG, l = vminLL)
path = (os.getwd() + 'split_cols\\{}\\'.format(filename[:-4]))
k = col_begin
for i in range((len(final_lines) - 1)):
if abs(final_lines[i] - final_lines[i+1]) > 200:
cropped = gray2[:,final_lines[i] + buffer_left:final_lines[i + 1]-buffer_right]
try:
skimage.io.imsave(path + '{}_col'.format(filename[:-4]) + str(k) + '.jpg', cropped)
print('saved')
except FileNotFoundError:
os.mkdir(path)
skimage.io.imsave(path + '{}_col'.format(filename[:-4]) + str(k) + '.jpg', cropped)
print('saved')
k += 1
firstLine = i + 1
if DEBUG:
skimage.io.imshow(cropped)
skimage.io.show()
cropped = gray2[:,final_lines[firstLine] + buffer_left:gray2.shape[:2][1]]
skimage.io.imsave(path + '{}_col'.format(filename[:-4]) + str(k) + '.jpg', cropped)
if DEBUG:
skimage.io.imshow(cropped)
skimage.io.show()
print('saved')
# __main__
for image_file in glob(f'*.jpg'):
try:
filename = image_file
print(filename)
gray2 = cv2.imread(filename)
# Convert to grayscale
"""
gray2 = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
"""
# Median Blur to remove noise
"""
gray2 = cv2.medianBlur(gray2, 3)
"""
# Convert to black and white
"""
thresh, th3 = cv2.threshold(gray2, 127, 255, cv2.THRESH_BINARY) # was 127, 255; 105, 255 worked alright
"""
# To invert the text to white
th3 = 255*(gray2 < 128).astype(np.uint8)
BEGINNING_THRESHOLD = 400
MAX_THRESHOLD = 800
INCREMENT_VALUE = 50 # By how much should threshold increase every time
#min line length (higher means less lines)
vminLL = 400
#rho value, or sensitivity value *Important for making more lines
vrho = 1.5
#What level of pixel to detect (higher means less lines)
vthresh = copy.copy(BEGINNING_THRESHOLD)
#How many pixels can break inbetween a line
vmaxLG = 4
#Cropped buffer on each side of the column
buffer_left = 0
buffer_right = -3
total_num_columns = []
num_columns = 0
while vthresh <= MAX_THRESHOLD:
try:
final_lines = cutter(x = vthresh, g = vmaxLG, l = vminLL)
num_columns = 0
lines = []
for i in range((len(final_lines) - 1)):
if abs(final_lines[i] - final_lines[i+1]) > 100:
lines.append(final_lines[i])
num_columns += 1
vthresh += INCREMENT_VALUE
total_num_columns.append(num_columns)
if num_columns == TARGET_COLS:
target_lines = lines
except AttributeError:
break
vthresh = 0
if TARGET_COLS - 1 in total_num_columns:
index = len(total_num_columns) - 1 - total_num_columns[::-1].index(TARGET_COLS - 1)
image_printer(index, col_begin = 1, filename, DEBUG)
else:
print("Target columns not found")
print('END END END')
except ValueError:
print("Value Error: Skipped")
|
from django.db.models import Model, DateField, DateTimeField, ManyToManyField, FileField
from django.db.models.query import QuerySet
from django.db.models.fields.related import ForeignKey
from django.core.files import File
from django.conf import settings
from django.utils.timezone import utc
from datetime import date, datetime, time
from uuid import uuid4
from os import path
def get_serialisable_fields(obj):
opts = obj._meta
fields = (field.name for field in opts.local_fields + opts.local_many_to_many)
fields = (opts.get_field(field) for field in fields)
for field in fields:
if field.rel:
yield field.name
else:
yield field.attname
def upload_attachment_file(instance, filename):
return 'preview/%s/%s/%s' % (
instance.preview.content_type.app_label,
instance.preview.content_type.model,
path.split(filename)[-1]
)
def serialise(value):
if isinstance(value, Model):
return value.pk
elif isinstance(value, datetime):
return value.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(value, date):
return value.strftime('%Y-%m-%d')
elif isinstance(value, time):
return value.strftime('%H:%M:%S')
elif isinstance(value, QuerySet):
return [v for v in value.values_list('pk', flat = True)]
elif isinstance(value, File):
return None
else:
return value
def unserialise(field, value):
if isinstance(field, ForeignKey):
return field.rel.to.objects.get(pk = value)
elif isinstance(field, DateTimeField):
return datetime.strptime(value, '%Y-%m-%d %H:%M:%S').replace(tzinfo = utc)
elif isinstance(field, DateField):
return datetime.strptime(value, '%Y-%m-%d').replace(tzinfo = utc)
elif isinstance(field, ManyToManyField):
return field.rel.to.objects.filter(pk__in = value)
elif isinstance(field, FileField):
if value.startswith(settings.MEDIA_URL):
return value[len(settings.MEDIA_URL):]
else:
return value
else:
return value |
from copy import deepcopy
from typing import Tuple
import bson
from delphin_6_automation.database_interactions.db_templates import delphin_entry, result_processed_entry, \
result_raw_entry
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import pandas as pd
from multiprocessing import Pool
# RiBuild Modules
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_dict
from delphin_6_automation.logging.ribuild_logger import ribuild_logger
# Logger
logger = ribuild_logger(__name__)
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
def get_delphin_ids():
ids = delphin_entry.Delphin.objects[:10].only('id')
logger.info(f'Got {ids.count()} projects')
ids = [project.id for project in ids]
return ids
def get_result_data(project_id) -> Tuple[list, list]:
result = result_raw_entry.Result.objects(delphin=project_id).first()
data = bson.BSON.decode(result.results.read())
return data['temperature algae']['result'][:43800], data['relative humidity algae']['result'][:43800]
def process_data(project_id):
logger.info(f'Processing project: {project_id}')
temp, rh = get_result_data(project_id)
if len(temp) != 43800:
return None
else:
data = {'temperature': temp, 'relative humidity': rh}
return data
if __name__ == '__main__':
server = mongo_setup.global_init(auth_dict)
project_ids = get_delphin_ids()
data = {}
for id_ in project_ids:
ret = process_data(id_)
if ret:
data[id_] = ret
mongo_setup.global_end_ssh(server)
reform = {(outerKey, innerKey): values for outerKey, innerDict in data.items() for innerKey, values in
innerDict.items()}
data_frame = pd.DataFrame(reform)
data_frame.to_excel("Delphin Projects.xlsx")
|
from gpudb import GPUdb
from gpudb import GPUdbRecordColumn
from gpudb import GPUdbRecordType
from gpudb import GPUdbRecord
from gpudb import GPUdbColumnProperty
from gpudb import collections
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import random
from album import *
def filterByTitle_ArtistID(seq, title, art_id):
for el in seq:
if el.artistID == art_id:
if el.title == el.formatTitle(title):
yield el
break
elif el.title.replace("And","&") == el.formatTitle(title):
yield el
break
elif el.title == el.formatTitle(title).replace("And","&"):
yield el
break
def filterByAlbumID(seq, alb_id):
for el in seq:
if int(el.albumID) == int(alb_id):
yield el
break
class albumCollection:
"""
AlbumCollection class
"""
musicBase = None
def __init__(self,mainMusicBase):
self.albums = [] #Album Collection
self.musicBase = mainMusicBase
def addAlbum(self,album):
if album.albumID == 0:
album.albumID = self.musicBase.db.insertAlbum(album)
album.musicDirectory = self.musicBase.musicDirectoryCol.getMusicDirectory(album.musicDirectoryID)
self.albums.append(album)
return album.albumID
def printAlbums(self):
for alb in self.albums:
alb.printInfos()
def loadAlbums(self):
for row_alb in self.musicBase.db.getSelect("SELECT albumID, title, year, dirPath, artistID, musicDirectoryID FROM albums"):
alb = album("")
alb.load(row_alb)
self.addAlbum(alb)
def findAlbums(self,stitle,artID):
albumList = []
for alb in filterByTitle_ArtistID(self.albums,stitle,artID):
albumList.append(alb)
return albumList
def getAlbum(self,albID):
resAlb = album("")
for alb in filterByAlbumID(self.albums,albID):
resAlb = alb
return resAlb
def getRandomAlbum(self):
nbAlbum = len(self.albums)
if(nbAlbum > 0):
irandom = random.randint(0, nbAlbum-1)
resAlb = self.albums[irandom]
return resAlb
if __name__ == '__main__':
from musicBase import *
ac = albumCollection()
ac.musicBase = musicBase()
ac.musicBase.loadMusicBase()
ac.loadAlbums()
ac.printAlbums()
|
import os
import sys
import shutil
import logging
from pathlib import Path
from ruamel.yaml import YAML
# from easygqcnn import DataProcesser
file_path = os.path.split(__file__)[0]
ROOT_PATH = os.path.abspath(os.path.join(file_path, '..'))
sys.path.append(os.path.join(ROOT_PATH, 'src'))
try:
from easygqcnn import DataProcesser
except Exception as e:
raise e
LOG_FILE = os.path.join(ROOT_PATH, 'tools/logs/data_process.log')
CFG_FILE = os.path.join(ROOT_PATH, 'config/data_process.yaml')
# RAW_PATH = r'H:\Robot\Dex-Net\DataSet\mini_dexnet_all_trans_01_20_17'
# OUT_PATH = r'H:\Robot\template\out'
# RAW_PATH = r'/root/Project/gmdata/gq-data/mix-dir-20x100'
# OUT_PATH = r'/root/Project/gmdata/gq-data/mix-dir-20x100-recorder'
GMDATA_PATH = Path.home().joinpath('Project/gmdata')
DATASET_PATH = GMDATA_PATH.joinpath('datasets/train_datasets')
INPUT_DATA_PATH = DATASET_PATH.joinpath('gq_data/small_data')
OUT_PATH = DATASET_PATH.joinpath('gq_data/small_data_train')
if os.path.exists(OUT_PATH):
shutil.rmtree(OUT_PATH)
os.makedirs(OUT_PATH)
def config_logging(file=None, level=logging.DEBUG):
""" 配置全局的日志设置
参考https://www.crifan.com/summary_python_logging_module_usage/
"""
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
# logging.basicConfig(filename=file, level=level,
# format=LOG_FORMAT, filemode='w')
logger = logging.getLogger('')
logger.setLevel(level)
rf_handler = logging.StreamHandler() # 默认是sys.stderr
# rf_handler.setLevel(logging.DEBUG)
rf_handler.setFormatter(logging.Formatter(LOG_FORMAT))
f_handler = logging.FileHandler(file, mode='w')
# f_handler.setLevel(logging.DEBUG)
f_handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(rf_handler)
logger.addHandler(f_handler)
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(
level=logging.DEBUG,
format='LINE %(lineno)-4d %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=logFilename,
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter(
'LINE %(lineno)-4d : %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def load_config(file):
""" 加载配置文件 """
yaml = YAML(typ='safe') # default, if not specfied, is 'rt' (round-trip)
with open(file, 'r', encoding="utf-8") as f:
config = yaml.load(f)
return config
def main():
config_logging(LOG_FILE)
config = load_config(CFG_FILE)
for t in 'gmd cor jaq'.split():
input_path = INPUT_DATA_PATH.joinpath(t)
out_path = OUT_PATH.joinpath(t)
if out_path.exists():
shutil.rmtree(out_path)
out_path.mkdir(parents=True)
processer = DataProcesser(config, input_path.as_posix(), out_path.as_posix())
processer.process(is_dex=False)
if __name__ == "__main__":
main()
|
#! /usr/bin/python
#-*- coding: utf-8 -*-
import time,gzip,stat,os,string
import hashlib
import fivemin_gl
from fivemin_common import get_logname
from shutil import rmtree
from struct import pack,unpack,calcsize
import gevent.monkey
gevent.monkey.patch_all()
class read_access():
def __init__(self,config,log):
# errorline file descriptor
self.errorlinefd = [0,0]
# 配置文件
self.config = config
# log记录
self.log = log
# url推送文件字典信息
self.file_list = []
# url推送哈希表信息
self.url_list = []
# fd打开的数目
self.cur_fd = [0,0]
# 打开access.log文件操作次数
self.iNumber_ori = 0
# 开启读取服务初始化
def ready_to_read(self):
for i in range(self.config.FILE_NAME):
tmpfile_dir = {}
tmpurl_dir = {}
# errorline的文件名字
szlogPath = "%s/%ld.%derrline" %(self.config.SERVICE_LIST[i]["OUTPATH"],time.time(),i)
# 创建错误的access.log存放的文件描述符
self.errorlinefd[i] = open(szlogPath, "aw", 00600)
self.file_list.append(tmpfile_dir)
self.url_list.append(tmpurl_dir)
try:
# 清空推送文件夹url
self.ready_del_url(i)
self.log.debug( "Fivemin_truckread-ready_to_read-error file created succeed:%d",i)
except IOError as ex:
self.log.error( "Fivemin_truckread-ready_to_read-error file created succeed:%s",str(ex))
return False
return True
# 关闭错误文件
def end_to_read(self):
for i in range(self.config.FILE_NAME):
self.errorlinefd[i].close()
# 删除文件描述符
self.close_fd(i,True)
# 定时更新URL缓存
def delete_url(self,iNumber):
# 如果没有开启收集url的开关
if not self.config.SERVICE_LIST[iNumber]["EXTRAURL"]:
return 0
self.log.debug("======start to swap======")
# squid缓存结构体长度
isquid_len = calcsize("ci4dq2h16s")
# cache文件
for cache_path in self.config.SQUID_SER_CACHE[iNumber]:
with open(cache_path,"rb") as cache_fd:
# 读取squid缓存数据
date = cache_fd.read(isquid_len)
if not date:
break
# 解压squid的缓存数据
squid_op,squid_swapfilen,squid_timestamp,squid_laastref,squid_expires,\
squid_lastmod,squid_swapfilesize,squid_refcount,squid_flags,squid_key = unpack("@ci4dq2h16s",date)
# 如果在缓存中则设置标志位
if squid_key in self.url_list[iNumber].keys():
self.url_list[iNumber][squid_key] = True
self.log.debug("start to check service[%d] url:%d",iNumber,len(self.url_list[iNumber]))
delete_file_list = []
# url文件轮询
for file_key,file_value in self.file_list[iNumber].iteritems():
# url缓存更新文件夹的绝对路劲
file_path = "%s/%s" %(self.config.SERVICE_LIST[iNumber]["URLPATH"],file_key)
# 临时文件的绝对路劲
file_tmppath = "%s/%s" %(self.config.SERVICE_LIST[iNumber]["URLPATH"],"tmpdomain")
# squid缓存更新存在的数据
szWrite = ""
# 文件夹的总大小
iWritelen = 0
# 临时文件的文件描述符
iTmp_fd = -1
#self.log.debug("progress url:%s",file_key)
# 打开文件,开始处理
with open(file_path,"r+") as file_fd:
for date in file_fd:
# 解析数据
date_list = date.split("^")
# 如果数据不合法
if len(date_list) > 2:
continue
url,url_state = date_list
url_key = hashlib.new("md5",url).digest()
# 数据不在内存中url
if url_key not in self.url_list[iNumber].keys():
# 不保存数据,处理下一行
continue
# 数据不再squid缓存中
if not self.url_list[iNumber][url_key]:
# 删除内存中url的数据
del self.url_list[iNumber][url_key]
# 数据在squid缓存中
else:
# 记录数据
szWrite = szWrite+date
# 重新设置缓存标志位
self.url_list[iNumber][url_key] = False
# 文件较大的读取临时文件
if len(szWrite) > 40*1024:
# 临时文件描述符是否开启
if iTmp_fd == -1:
iTmp_fd = open(file_tmppath,"w")
# 写入文件
iTmp_fd.write(szWrite)
szWrite = ""
# 临时文件的处理
if iTmp_fd != -1:
# 有剩余内容,写入文件
if szWrite:
iTmp_fd.write(szWrite)
# 关闭文件
iTmp_fd.close()
# 文件内容不大处理
else:
# 有文件内容写入
if szWrite:
# 写入文件
iWritelen = len(szWrite)
file_fd.write(szWrite)
file_fd.truncate(iWritelen)
# 临时文件的话重命名
if iTmp_fd != -1:
os.remove(file_path)
os.rename(file_tmppath,file_path)
# 本文件如果没有内容侧删除文件
else:
if iWritelen == 0:
# 关闭打开着的文件描述符
if file_value[0] != -1:
os.close(file_value[0])
self.cur_fd[iNumber] -= 1
# 清除文件字典中file_key
delete_file_list.append(file_key)
# 删除文件
os.remove(file_path)
# 删除file文件的key值
for key_value in delete_file_list:
del self.file_list[iNumber][key_value]
# 清空list
delete_file_list = []
self.log.debug("end to check service[%d] url:%d",iNumber,len(self.url_list[iNumber]))
self.log.debug("end to check service[%d] file:%d",iNumber,len(self.file_list[iNumber]))
return 0
# 初始化删除之前记录的url
def ready_del_url(self,iNumber):
if not self.config.SERVICE_LIST[iNumber]["EXTRAURL"]:
return 0
szPath = self.config.SERVICE_LIST[iNumber]["URLPATH"]
# 删除目录以及目录以下内容
rmtree(szPath)
# 重新创建目录
os.mkdir(szPath)
return 0
# 读取access.log的主循环
def read_to_queue(self):
# 创建错误log文件描述符
self.ready_to_read()
# 循环log时间
tTime_Loop = time.time()
# 循环URL更新时间
tUrl_day = 0
# 处理access.log
self.progresslog()
while 1:
# access.log刷新时间
tTme_Now = time.time()
# 更新Url缓存时间
tUrl_Now = time.localtime()
# 1分钟处理,防止频繁处理log
if (tTme_Now - tTime_Loop) > 60:
tTime_Loop = tTme_Now
self.progresslog()
# 定时更新URL缓存
if ((tUrl_day != tUrl_Now.tm_mday) and (tUrl_Now.tm_hour == 13) and ((tUrl_Now.tm_min - 10) < 8)):
for i in range(self.config.FILE_NAME):
# 开始更新Url
self.delete_url(i)
# 刷新更新Url时间
tUrl_day = tUrl_Now.tm_mday
# 退出5程序
if fivemin_gl.bStopPro[0]:
self.log.debug("Fivemin_truckread-read_to_queue-program will exit")
# 等待上传程序,处理log程序结束
if self.wait_stop():
self.log.debug("Fivemin_truckread-read_to_queue-read exit")
break
gevent.sleep(5)
# 关闭错误的access.log文件
self.end_to_read
return
# 等待退出
def wait_stop(self):
gevent.sleep(40)
# 上传进程关闭成功
if fivemin_gl.bStopPro[1]:
# 轮询Service
for i in range(self.config.FILE_NAME):
self.log.debug("Fivemin_truckread-wait_stop-truck exit")
# 如果开启流量文件,则关闭
if fivemin_gl.flow_fd[i]:
fivemin_gl.flow_fd[i].close()
# 将流量临时文件重命名
szflow_path_name = get_flowlogname(self.config.BING_IP[i])
tmpflow_name = "%s/flow.tmp%d.log" %(self.config.SERVICE_LIST[iNumber]["FLOWPATH"],i)
flow_name = "%s/%s" %(self.config.SERVICE_LIST[iNumber]["FLOWPATH"],szflow_path_name)
os.rename(tmpflow_name,flow_name)
fivemin_gl.flow_fd[i] = 0
# 如果开启带宽文件,则关闭
if fivemin_gl.flux_fd[i]:
fivemin_gl.flux_fd[i].close()
# 将带宽临时文件重命名
szflux_path_name = get_fluxname(self.config.BING_IP[i])
tmpflux_name = "%s/flux.tmp%d.gz" %(self.config.SERVICE_LIST[iNumber]["FLUXPATH"],i)
flux_name = "%s/%s" %(self.config.SERVICE_LIST[iNumber]["FLUXPATH"],szflux_path_name)
os.rename(tmpflux_name,flux_name)
fivemin_gl.flux_fd[i] = 0
return True
# 上传进程没有结束,则继续等待
else:
self.wait_stop()
# 轮询读取access.log
def progresslog(self):
for i in range(self.config.FILE_NAME):
self.log.debug("Fivemin_truckread-progresslog-Service %d analysis log ",i)
# 开始读取文件
if self.readfile(i) < 0:
self.log.error("Fivemin_truckread-progresslog-Service %d analysis log error",i)
return 0
# 检查文件信息
def check_filelog(self,iNumber,path):
# 检查文件是否存在
if not os.path.exists(path):
return -2
# 获取文件状态
file_stat = os.stat(path)
if None != file_stat:
# 文件夹内容的size
if file_stat [ stat.ST_SIZE ] > 1024 * 1024 * 512:
self.log.error("Fivemin_truckread-check_filelog-%s:larger than 512M",path)
self.log.debug("Fivemin_truckread-check_filelog- %s size:%ld K",path,(file_stat [ stat.ST_SIZE ]/1024))
else:
self.log.error("Fivemin_truckread-check_filelog-stat %s error",path)
return -1
return 0
# 对一行数据进行处理
def do_squid(self,szline,date_list,iNumber):
# 分割一行数据
line_list = szline.split('\t')
# squid日志有37个字段
#if len(line_list) != 37 and len(line_list) != 19:
if len(line_list) < 19:
# 无法处理的access.log日志放入errorline
self.log.error("Fivemin_truckread-do_squid-Service[%d] error read access.log line",iNumber)
if self.errorlinefd[iNumber] > 0:
self.errorlinefd[iNumber].write(szline)
return -1
# 初始化5分钟切割文件的时间戳
if fivemin_gl.start_time[iNumber] == 0:
fivemin_gl.start_time[iNumber] = int(string.atof(line_list[1]))
# timestamp,requestUrl,clientip,chahe_code,cache_size,last_time,hier_code
lineinfo = [line_list[1],line_list[8],line_list[0],line_list[5],line_list[6],line_list[17],line_list[12]]
# 加入list
date_list.append(lineinfo)
if not self.config.SERVICE_LIST[iNumber]["EXTRAURL"]:
self.collect_Url(line_list[8],line_list[5],iNumber)
return 0
# Url推送相关
def collect_Url(self,url,url_status,iNumber):
# 切割域名
url_list = url.split("/")
# 排除无效的Url
if len(url_list) < 3:
return -1
else:
domain = url_list[2]
# 排除不规则的Url和域名
if 255 < len(url) or 1 > len(domain):
return -1
# 域名转化为md5值
hash_key = hashlib.new("md5",url).digest()
# 重复的Url
if hash_key in self.url_list[iNumber].keys():
return 0
# 拼接写入内容
szWri = "%s^%s\n" %(url,url_status)
#szWri = pack("256s32s",url,url_status)
# 文件夹fd的时间戳
file_time = int(time.time())
# 拼接文件域名的绝对路径
szdomain_file = "%s/%s" %(self.config.SERVICE_LIST[iNumber]["URLPATH"],domain)
# 判断打开的文件描述符已经达到最大值
self.close_fd(iNumber,False)
# 文件已经开启
if domain in self.file_list[iNumber].keys():
# 开启的文件描述符已经被关闭
if self.file_list[iNumber][domain][0] == -1:
file_fd = os.open(szdomain_file,os.O_RDWR|os.O_CREAT)
self.cur_fd[iNumber] += 1
# 修改域名文件字典
self.file_list[iNumber][domain][0] = file_fd
# 写入文件
os.write(self.file_list[iNumber][domain][0],szWri)
# 添加url字典的value
self.url_list[iNumber][hash_key] = False
# 域名文件没有
else:
# 创建文件
file_fd = os.open(szdomain_file,os.O_RDWR|os.O_CREAT)
self.cur_fd[iNumber] += 1
try :
# 写入文件
os.write(file_fd,szWri)
# 域名字典中内容:文件描述符,时间戳,url位置
file_value = [file_fd]
# 添加域名文件字典
self.file_list[iNumber][domain] = file_value
# 添加url字典的value
self.url_list[iNumber][hash_key] = False
except IOError as ex:
print "error"
return -1
return 0
# 关闭早的文件描述符
def close_fd(self,iNumber,exit_flag):
# 超过上限就关闭文件描述符
if (self.cur_fd[iNumber] < 9999) and not exit_flag:
return 0
for key,valus in self.file_list[iNumber].iteritems():
# 关闭所有的描述符
if valus[0] != -1:
os.close(valus[0])
valus[0] = -1
self.cur_fd[iNumber] -= 1
return 0
# 加入队列
def add_queue(self,date_list,iNumber):
# 有数据就加入队列
if date_list:
# 加入service1的队列处理
if 0 == iNumber:
# 队列已满,等待5s时间处理
if fivemin_gl.queue_task_date1.full():
self.log.debug("Fivemin_truckread-readfile-Service%d:queue_task_date1 is full",iNumber)
gevent.sleep(5)
fivemin_gl.queue_task_date1.put(date_list,block = True, timeout = 20)
# 加入service2的队列处理
elif 1 == iNumber:
# 队列已满,等待5s时间处理
if fivemin_gl.queue_task_date2.full():
self.log.debug("Fivemin_truckread-readfile-Service%d:queue_task_date1 is full",iNumber)
gevent.sleep(5)
fivemin_gl.queue_task_date2.put(date_list,block = True,timeout = 20)
# 读取文件
def readfile(self,iNumber):
# 检查access.log文件的信息
szAccessPath = self.config.SERVICE_LIST[iNumber]["INPATH"]
if self.check_filelog(iNumber,szAccessPath) < 0:
return -1
# 检查log.writing的文件信息
szWritepath = "%s/log.writing%d" %(self.config.SERVICE_LIST[iNumber]["OUTPATH"],iNumber)
if -1 == self.check_filelog(iNumber,szWritepath):
return -1
# access.log文件描述符
Accfd = open(szAccessPath, 'r+')
# 读取后的文件的描述符
Wrifd = gzip.open(szWritepath, "ab+")
# 写入原始日志文件的buff
szWriBuf = ""
# 读取文件行数,5000行为一个队列
ilineNumber = 0
# 是否继续读取文件的flag
bcontstatus = False
# 读取行数,防止读取文件过快,处理程序来不及处理
ilinetimes = 0
# 数据太少,原始日志一定次数过后自动打包
self.iNumber_ori += 1
try:
while 1:
# 队列list
date_list = []
# 读取5000行后,文件没有结束,继续读取
bcontstatus = False
for szline in Accfd:
# 读取文件速度控制
ilinetimes += 1
if 512 < ilinetimes:
ilinetimes = 0
gevent.sleep(0.001)
# 不同的平台录入不同的数据
if 0 == self.config.SERVICE_LIST[iNumber]["DATAFORMAT"]:
if self.do_squid(szline,date_list,iNumber) < 0:
continue
if 1 == self.config.SERVICE_LIST[iNumber]["DATAFORMAT"]:
if self.do_squid(szline,date_list,iNumber) < 0:
continue
# 写入原始日志
szWriBuf = szWriBuf+szline
if len(szWriBuf) > 1024*40 :
Wrifd.write(szWriBuf)
szWriBuf = ""
# 读取的有效行数计数
ilineNumber += 1
#读取5000行数据
if ilineNumber > 4999:
ilineNumber = 0
bcontstatus = True
break
# 数据加入队列
self.add_queue(date_list,iNumber)
# 如果没有读完,继续读取access.log
if bcontstatus:
continue
# 读取文件完毕,跳出循环
break
# 原始数据写入
if len(szWriBuf) > 0:
Wrifd.write(szWriBuf)
szWriBuf = ""
self.log.debug("Fivemin_truckread-readfile-Cut file OK")
# 清空access.log内容
Accfd.truncate(0)
# 关闭文件
Accfd.close()
Wrifd.close()
# 原始日志文件处理
self.creat_rawlog(szWritepath,iNumber)
return 0
except Exception as ex:
self.log.error("Fivemin_truckread-readfile-%s",str(ex))
# 关闭相关文件描述符
Accfd.close()
Wrifd.close()
return -1
# 把原始日志重名并且转移至原始日志文件夹
def creat_rawlog(self,oldpath,iNumber):
#原始日志超过40M 就重新开文件
szName = get_logname(self.config.BING_IP[iNumber])
szPathName = "%s/%s" %(self.config.SERVICE_LIST[iNumber]["LOGPATH"],szName)
Wrifile_stat = os.stat(oldpath)
# 文件属性
if None != Wrifile_stat:
ifile_size = Wrifile_stat[stat.ST_SIZE]
if ifile_size > 0:
if (ifile_size > 1024*1024*40) or (self.iNumber_ori > 60*6):
# 重命名文件,把文件转移到原始日志文件夹
os.rename(oldpath, szPathName)
self.log.debug("Fivemin_truckread-readfile-rename file ok writing->access.gz")
else:
self.log.error("Fivemin_truckread-readfile-stat log.writing file error")
# 重命名文件,把文件转移到原始日志文件夹
os.rename(oldpath, szPathName)
|
from entitybook import BasicBook
from entitybook import BookSearchByName
from entitybook import BookSearchByAuth
from entitybook import BookSearchByCategorical
from entitybook import BookSearchByPublisher
from entitybook import BookSearchByIsbn
from entitybook import BookOrderByHotPoint
from entitybook import BookControlNumber
import re
class BookDetail(object):
def __init__(self, arg):
self.insert(arg)
def insert(self, arg):
name_match = re.compile(u"\uFF08.*?\uFF09")
self.book_name = name_match.sub("",arg.book_name).replace(" ", "")
self.book_auth_name = arg.book_auth_name
self.book_isbn = arg.book_isbn
self.book_publisher = arg.book_publisher
if args[4] != None:
self.book_word = arg.book_word
else:
self.book_word = ""
if args[5] != None:
self.book_publish_time = arg.book_publish_time
else:
self.book_publish_time = ""
if args[6] != None:
self.book_introduce = arg.book_introduce.replace(" ", "")
else:
self.book_introduce = ""
class HotBookDetail(object):
def __init__(self, arg):
self.insert(arg)
def insert(self, arg):
name_match = re.compile(u"\uFF08.*?\uFF09")
self.book_name = name_match.sub("",arg.book_name).replace(" ", "")
self.book_isbn = arg.book_isbn
if args[2] != None:
self.book_remain = arg.book_remain
else:
self.book_remain = 0
if args[3] != None:
self.book_hot_point = arg.book_hot_point
else:
self.book_hot_point = 0
class BookManager(object):
def __init__(self):
self.book = BasicBook()
def search(self, arg):
temp = []
temp.extend(BookSearchByName(self.book, arg))
temp.extend(BookSearchByAuth(self.book, arg))
temp.extend(BookSearchByCategorical(self.book, arg))
temp.extend(BookSearchByPublisher(self.book, arg))
temp.extend(BookSearchByIsbn(self.book, arg))
result = []
for element in temp:
result.append(BookDetail(element))
return result
def showhot(self, state, number):
temp = BookOrderByHotPoint(self.book, state)
return BookControlNumber(temp, 0, number+1)
|
# Generated by Django 2.2.1 on 2019-06-14 14:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0009_auto_20190614_1414'),
]
operations = [
migrations.AlterField(
model_name='bookcopy',
name='borrow_date',
field=models.DateField(blank=True, null=True),
),
]
|
"""from copy import copy
from math import sqrt
import numpy as np
import itertools as it
"""
class Node(object):
num_instances = 0
def __init__(self, loc, val):
__class__.num_instances += 1
self.loc = loc
self.val = val
self.label = __class__.num_instances
self.connected = set()
self.probabilities = {}
self.probability_cache = {}
def set_initial(self, probability=0.0):
self.probabilities[self.val] = probability
def connect(self, other):
self.connected.add(other)
def update(self):
self.probabilities = copy(self.probability_cache)
self.probability_cache = {}
def take(self, prob):
for k, v in prob.items():
new_k = k + self.val
if new_k in self.probability_cache.keys():
self.probability_cache[new_k] += v
else:
self.probability_cache[new_k] = v
def step(self):
n = len(self.connected)
split = {k: v / n for k, v in self.probabilities.items()} # Here it's assumed equal probability per connected
if len(split) > 0:
for node in self.connected:
node.take(split)
def __str__(self):
return 'Nodes[{}, {}] = {} Current vals: {}'.format(self.loc[0], self.loc[1], self.val, self.probabilities)
class Board(object):
moves = {
'upleft': np.array((-1, 2))
, 'upright': np.array((1, 2))
, 'rightup': np.array((2, 1))
, 'rightdown': np.array((2, -1))
, 'downright': np.array((1, -2))
, 'downleft': np.array((-1, -2))
, 'leftdown': np.array((-2, -1))
, 'leftup': np.array((-2, 1))
}
def __init__(self, N):
board = np.arange(N**2, dtype=int).reshape((N, N))
self.N = N
self.indices = np.concatenate(np.indices(board.shape).T)
self.nodes = [[None]*N for _ in range(N)]
for val, idx in zip(board.flatten(), self.indices):
self.nodes[idx[0]][idx[1]] = Node(idx, val)
for node in self.all_nodes():
for move in __class__.moves.values():
new_pos = node.loc + move
if np.all(new_pos < N) and np.all(new_pos >= 0):
node.connect(self.nodes[new_pos[0]][new_pos[1]])
def __getitem__(self, loc):
"""Return reference to the node at i, j location."""
i, j = loc
return self.nodes[i][j]
def set_initial(self, loc_vals=None):
if loc_vals is None:
loc_vals = {(0, 0): 1.0}
else:
if abs(sum(loc_vals.values()) - 1.0) > 1e-8:
raise ValueError('Sum of initial probabilities must equal 1.0')
for loc, val in loc_vals.items():
self[loc[0], loc[1]].set_initial(val)
def all_nodes(self):
return sorted(it.chain.from_iterable(self.nodes), key=lambda n: n.val)
def step(self):
for node in self.all_nodes():
node.step()
# must be done after all steps, values for next step are cached
for node in self.all_nodes():
node.update()
def vals(self):
totals = {}
for n in self.all_nodes():
for val, prob in n.probabilities.items():
if val in totals.keys():
totals[val] += prob
else:
totals[val] = prob
return totals
def moment(self, order):
return sum(prob * val**order for val, prob in self.vals().items())
def __str__(self):
return '\n'.join(str(node) for node in self.all_nodes())
np.set_printoptions(linewidth=200)
b = Board(4)
b.set_initial({(1, 1): 0.5, (3, 2): 0.5})
nsteps = 40
prev = 0.0
for n in range(nsteps):
b.step()
vals = b.vals()
mean = b.moment(1)
var = b.moment(2) - mean ** 2
print("Step {:3}:{:8.2f} +/-{:15.10f} {}".format(n+1, mean, sqrt(var), mean - prev))
prev = mean
|
# SmileyBounce2.py
import pygame # Setup
pygame.init()
screen = pygame.display.set_mode([800,600])
keep_going = True
pic = pygame.image.load("CrazySmile.bmp")
colorkey = pic.get_at((0,0))
pic.set_colorkey(colorkey)
picx = 0
picy = 0
BLACK = (0,0,0)
timer = pygame.time.Clock()
speedx = 5
speedy = 5
while keep_going: # Game loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
keep_going = False
picx += speedx
picy += speedy
if picx <= 0 or picx + pic.get_width() >= 800:
speedx = -speedx
if picy <= 0 or picy + pic.get_height() >= 600:
speedy = -speedy
screen.fill(BLACK)
screen.blit(pic, (picx, picy))
pygame.display.update()
timer.tick(60)
pygame.quit() # Exit
|
import time
import filtering as filt
import features as feat
import features2 as feat2
import matplotlib.pyplot as plt
import dwt
#path = 'data/'
def main(data):
signal_type = 0
fs = 250
qtcN = 470;
f = open(data, 'r')
lines = f.readlines()
f.close()
datafil = filt.main_filter(lines,signal_type)
rec35, rec58, rec15, rec48 = dwt.wavelet(datafil, 'db6', 8)
result = feat.main_test(datafil, fs, qtcN)
#result = feat2.main_test(rec35, rec58, rec15, rec48, fs, qtcN)
plt.show() |
# --- Very Basic Instructions ---
# 1 - place a video clip in a bucket on your Google Cloud Storage and set permission to public
# 2 - run the code from the GCP cloud VM
# 3 - run the requirements.txt file (pip install -r requirements.txt)
# 4 - run video_processing.py clip_name bucket_name at the command prompt
# this will create tmp folder and under a series of folders including faces_found and text_found
# where it will store what it learned from your clip
# 5 - Don't forget to delete the clip (or remove public permission at the very least) and turn
# you VM off!
# If you have ffmpeg issues try this:
# sudo apt-get install ffmpeg
from __future__ import absolute_import
import glob, os, sys, io, skvideo.io, argparse, math, datetime, ffmpy, shutil, wikipedia
from google.cloud import videointelligence
from google.cloud import vision
from google.cloud import storage
from google.cloud.vision import types
from PIL import Image, ImageDraw
import numpy as np
def init():
# clean out directory structure
os.system('rm -r tmp')
def analyze_labels(movie_to_process, bucket_name):
path = 'gs://' + bucket_name + '/' + movie_to_process
print(path)
""" Detects labels given a GCS path. """
video_client = videointelligence.VideoIntelligenceServiceClient()
#result = video_client.annotate_video
features = [videointelligence.enums.Feature.LABEL_DETECTION]
print(features)
mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE
config = videointelligence.types.LabelDetectionConfig(
label_detection_mode=mode)
context = videointelligence.types.VideoContext(
label_detection_config=config)
# #print(context)
operation = video_client.annotate_video(
path, features=features, video_context=context)
print('\nProcessing video for label annotations:')
result = operation.result(timeout=90)
print('\nFinished processing.')
frame_offsets = []
# Process frame level label annotations
frame_labels = result.annotation_results[0].frame_label_annotations
for i, frame_label in enumerate(frame_labels):
#if (frame_label.entity.description == 'person'):
print('Frame label description: {}'.format(
frame_label.entity.description))
for category_entity in frame_label.category_entities:
if (category_entity.description == 'person'):
print('\tLabel category description: {}'.format(
category_entity.description))
print(frame_label)
# Each frame_label_annotation has many frames,
# here we print information only about the first frame.
#for frame in frame_label.frames:
frame = frame_label.frames[0]
time_offset = (frame.time_offset.seconds +
frame.time_offset.nanos / 1e9)
print('\tFirst frame time offset: {}s'.format(time_offset))
print('\tFirst frame confidence: {}'.format(frame.confidence))
print('\n')
frame_offsets.append(time_offset)
return(sorted(set(frame_offsets)))
def extract_image_from_video(video_input, name_output, time_stamp):
ret = "Error"
try:
ret = os.system("ffmpeg -i " + video_input + " -ss " + time_stamp + " -frames:v 1 " + name_output)
# if all goes well FFMPEG will return 0
return ret
except ValueError:
return("Oops! error...")
def crop_image(input_image, output_image, start_x, start_y, width, height):
"""Pass input name image, output name image, x coordinate to start croping, y coordinate to start croping, width to crop, height to crop """
input_img = Image.open(input_image)
# give the image some buffer space
start_with_buffer_x = int(start_x - np.ceil(width/2))
start_with_buffer_y = int(start_y - np.ceil(height/2))
width_with_buffer = int(start_x + width + np.ceil(width/2))
height_with_buffer = int(start_y + height + np.ceil(height/2))
box = (start_with_buffer_x, start_with_buffer_y, width_with_buffer, height_with_buffer)
output_img = input_img.crop(box)
output_img.save(output_image +".png")
return (output_image +".png")
def detect_face(face_file, max_results=4):
# can you find a face and return coordinates
client = vision.ImageAnnotatorClient()
content = face_file.read()
image = types.Image(content=content)
# return coords of face
return client.face_detection(image=image).face_annotations
def highlight_faces(image, faces):
# Draws a polygon around the faces, then saves to output_filename.
faces_boxes = []
im = Image.open(image)
draw = ImageDraw.Draw(im)
for face in faces:
box = [(vertex.x, vertex.y)
for vertex in face.bounding_poly.vertices]
draw.line(box + [box[0]], width=5, fill='#00ff00')
faces_boxes.append([box[0][0], box[0][1], box[1][0] - box[0][0], box[3][1] - box[0][1]])
return (faces_boxes)
def annotate(path):
"""Returns web annotations given the path to an image."""
client = vision.ImageAnnotatorClient()
if path.startswith('http') or path.startswith('gs:'):
image = types.Image()
image.source.image_uri = path
else:
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
web_detection = client.web_detection(image=image).web_detection
return web_detection
def report(annotations, max_report=5):
"""Prints detected features in the provided web annotations."""
names = []
if annotations.web_entities:
print ('\n{} Web entities found: '.format(
len(annotations.web_entities)))
count = 0
for entity in annotations.web_entities:
print('Score : {}'.format(entity.score))
print('Description: {}'.format(entity.description))
names.append(entity.description)
count += 1
if count >=max_report:
break;
return names
def get_stills(movie_to_process, bucket_name, timestamps_to_pull):
video_location = 'https://storage.googleapis.com/' + bucket_name + '/' + movie_to_process
storage_client = storage.Client()
max_results = 3
timestamps_to_pull_tmp = timestamps_to_pull + [x + 0.15 for x in timestamps_to_pull[:-1]] + [x - 0.15 for x in timestamps_to_pull[1:]]
# clear out stills folder
if len(timestamps_to_pull_tmp) > 0:
# create directory structure
os.system('mkdir tmp')
os.system('mkdir tmp/faces_found')
os.system('mkdir tmp/text_found')
os.system('mkdir tmp/face_images')
filepath = 'tmp/'
# make stills
cnt_ = 0
for ttp in timestamps_to_pull_tmp:
# get the still image at that timestamp
time_stamp = str(datetime.timedelta(seconds=ttp))
file = "still_" + str(cnt_) + ".png"
filePathAndName = filepath + file
print('filename: ' + time_stamp)
ret = extract_image_from_video(video_input = video_location, name_output = filePathAndName, time_stamp = time_stamp)
cnt_ += 1
# find face on still image
with open(filePathAndName, 'rb') as image:
faces = detect_face(image, max_results)
print('Found {} face{}'.format(
len(faces), '' if len(faces) == 1 else 's'))
print('Looking for a face {}'.format(filePathAndName))
# Reset the file pointer, so we can read the file again
image.seek(0)
faces_boxes = highlight_faces(filePathAndName, faces) #, filePathAndName)
print('faces_boxes:', faces_boxes)
if len(faces_boxes) > 0:
# image had a face
count = 0
for face_box in faces_boxes:
# cv2.imwrite("frame%d.jpg" % count, image) # save frame as JPEG file
saved_name = crop_image(filePathAndName, "tmp/face_images/" + file.split('.')[0] + str(count) + '_faces', face_box[0], face_box[1], face_box[2], face_box[3])
count += 1
# get actors name
potential_names = report(annotate(saved_name),2)
print('potential_names: ', potential_names)
# does the first have two words - as in first and last name?
if (len(potential_names[0].split()) == 2):
# we have a winner
new_name = 'tmp/faces_found/' + potential_names[0] + '.png'
shutil.copy(saved_name,new_name)
# extract wiki bio
rez = wikipedia.page(potential_names[0]).content
# keep only intro paragraph
with open('tmp/text_found/' + potential_names[0] + ".txt", "w") as text_file:
text_file.write(rez.split('\n\n')[0] + " (Source: Wikipedia.com)")
BUCKET_NAME = ''
MOVIE_TO_PROCESS = ''
if __name__ == "__main__":
if len(sys.argv) == 3:
MOVIE_TO_PROCESS = sys.argv[1]
BUCKET_NAME = sys.argv[2]
# start things off clean
print('Cleaning up...')
init()
print('Finding people...')
# use video intelligence to find high probability of people being visible
timestamps_to_pull = analyze_labels(MOVIE_TO_PROCESS, BUCKET_NAME)
print('Processing people...')
get_stills(MOVIE_TO_PROCESS, BUCKET_NAME, timestamps_to_pull)
print('All done...')
else:
print('Wrong argument inputs')
|
# -*- coding: utf-8 -*-
""" ymir.puppet
Defines a puppet mixin for the base ymir service service class
"""
import re
import os
import glob
import shutil
import functools
from fabric import api
from fabric.contrib.files import exists
from ymir.util import puppet as util_puppet
from ymir import data as ydata
GIT_ROLE = 'geerlingguy.git'
# if/when puppet build happens, it more or less follows the instructions here:
# https://docs.puppetlabs.com/puppet/3.8/reference/install_tarball.html
PUPPET_VERSION = [3, 4, 3]
def noop_if_no_puppet_support(fxn):
""" """
@functools.wraps(fxn)
def newf(self, *args, **kargs):
if not self._supports_puppet:
return
return fxn(self, *args, **kargs)
return newf
class PuppetMixin(object):
""" """
@property
def _supports_puppet(self):
""" use _service_json here, it's a simple bool and not templated """
if not hasattr(self, '_supports_puppet_cache'):
self._supports_puppet_cache = self._service_json[
'ymir_build_puppet']
icon = ydata.SUCCESS if self._supports_puppet_cache else ydata.FAIL
self.report(icon + "ymir puppet support enabled?")
return self._supports_puppet_cache
@property
def _puppet_metadata(self):
return os.path.join(self._puppet_dir, 'metadata.json')
@property
def _puppet_dir(self):
pdir = os.path.join(self._ymir_service_root, 'puppet')
return pdir
@property
def _puppet_templates(self):
return self._get_puppet_templates()
def _get_puppet_templates(self):
""" return puppet template files relative to working directory """
return glob.glob(
os.path.join(self._puppet_dir, 'modules', '*', 'templates', '*'))
def _get_puppet_template_vars(self):
""" returns a dictionary of { puppet_file : [..,template_vars,..]}"""
out = {}
for f in self._puppet_templates:
with open(f, 'r') as fhandle:
content = fhandle.read()
out[f] = [x for x in re.findall('<%= @(.*?) %>', content)]
return out
@noop_if_no_puppet_support
def copy_puppet(self, clean=True, puppet_dir='puppet', lcd=None):
""" copy puppet code to remote host (refreshes any dependencies) """
lcd = lcd or self._ymir_service_root
# remote_user_home = '/home/' + self._username
self.report(' flushing remote puppet code and refreshing')
return self._rsync(
src=os.path.join(lcd, puppet_dir, '*'),
dest=os.path.join('~', puppet_dir),
delete=clean,
)
@noop_if_no_puppet_support
def _clean_puppet_tmp_dir(self):
""" necessary because puppet librarian is messy,
and these temporary files can interfere with
validation
"""
tdir = os.path.join(self._ymir_service_root, 'puppet', '.tmp')
if os.path.exists(tdir):
shutil.rmtree(tdir)
self.report(ydata.SUCCESS + "cleaned puppet-librarian tmp dir")
def _provision_puppet(self, provision_item, puppet_dir='puppet', extra_facts={}):
""" runs puppet on remote host. puppet files must already have been copied """
service_data = self.template_data()
facts = self.facts
facts.update(**extra_facts)
with self._rvm_ctx():
return util_puppet.run_puppet(
provision_item,
parser=service_data['puppet_parser'],
facts=facts,
debug=self._debug_mode,
puppet_dir=puppet_dir,)
@property
def _using_rvm(self):
with api.quiet():
has_rvm = api.run('which rvm').succeeded
return has_rvm
def _rvm_ctx(self, ruby_version='system'):
if self._using_rvm: # ruby version was old so ymir installed another ruby side-by-side
prefix = "rvm use " + ruby_version
else:
prefix = "true"
return api.prefix(prefix)
@noop_if_no_puppet_support
def _setup_puppet_deps(self, force=False):
""" puppet itself is already installed at this point,
this sets up the provisioning dependencies
"""
def sync_puppet_librarian(_dir):
found_modules = exists(os.path.join(
_dir, 'modules'), use_sudo=True)
if not force and found_modules:
msg = "puppet-librarian has already processed modules and `force` was unset"
self.report(ydata.SUCCESS + msg)
return
if not found_modules:
msg = "puppet-librarian hasn't run yet, modules dir is missing"
self.report(ydata.FAIL + msg)
if force:
msg = "update for puppet-librarian will be enforced"
self.report(ydata.SUCCESS + msg)
with api.cd(_dir):
with self._rvm_ctx("1.9.3"):
api.run('librarian-puppet clean')
api.run('librarian-puppet install {0}'.format(
'--verbose' if self._debug_mode else ''))
msg = "puppet-librarian finished updating puppet modules"
self.report(ydata.SUCCESS + msg)
self.report('installing puppet & puppet deps', section=True)
if not self._supports_puppet:
return
self._install_puppet()
self._install_ruby()
self._install_git()
with api.quiet():
has_gem = api.run("gem --version").succeeded
if not has_gem:
self.report(
ydata.FAIL + "`gem` not found but ruby was already installed!")
raise SystemExit(1)
with api.quiet():
has_librarian = api.run(
"gem list | grep -c librarian-puppet").succeeded
if not has_librarian:
self.report(ydata.FAIL + "puppet librarian not found")
with self._rvm_ctx("1.9.3"):
if self._using_rvm:
api.sudo('gem install puppet --no-ri --no-rdoc')
api.sudo('gem install librarian-puppet --no-ri --no-rdoc')
else:
self.report(ydata.SUCCESS + "puppet librarian already installed")
sync_puppet_librarian("puppet")
def _install_ruby(self):
""" installs ruby on the remote service,
requiring at least version 1.9. if not found,
ruby_version: 2.2.3 will be installed
"""
with api.quiet():
has_ruby = api.run("ruby --version")
ruby_version = has_ruby.succeeded and has_ruby.split()[1]
has_ruby = has_ruby.succeeded
if not has_ruby or not (ruby_version.startswith('1.9') or ruby_version.startswith('2')):
self.report(
ydata.FAIL + "ruby is missing or old: " + str(ruby_version))
self._provision_ansible_role(
"rvm_io.rvm1-ruby", rvm1_rubies=['ruby-1.9.3'])
self.sudo("rvm default system")
self.run("rvm default system")
self.report(ydata.SUCCESS +
"finished installing new ruby with rvm")
else:
msg = "ruby is present on the remote side. version={0}"
self.report(ydata.SUCCESS + msg.format(ruby_version))
def _install_git(self):
""" installs git on the remote service """
with api.quiet():
has_git = api.run("git --version").succeeded
if not has_git:
self.report(ydata.FAIL + "git is missing, installing it")
with api.hide("output"):
self._apply_ansible_role(GIT_ROLE)
self.report(ydata.SUCCESS + "git was installed")
else:
self.report(ydata.SUCCESS + "remote side already has git")
def _install_puppet(self):
""" """
def build_puppet():
cmd = "git clone https://github.com/hashicorp/puppet-bootstrap.git"
self.report("checking for bootstrap scripts")
if not os.path.exists(
os.path.join(
self._ansible_dir,
'puppet-bootstrap')):
with api.lcd(self._ansible_dir):
api.local(cmd)
self._provision_ansible_playbook("ansible/puppet.yml")
with api.quiet():
puppet_version = api.run('puppet --version')
puppet_installed = not puppet_version.failed
if puppet_installed:
puppet_version = puppet_version.strip().split('.')
puppet_version = map(int, puppet_version)
msg = "puppet is already installed, version is {0}"
self.report(ydata.SUCCESS + msg.format(puppet_version))
else:
puppet_version = None
msg = "puppet not installed, building it from scratch"
self.report(ydata.FAIL + msg)
return build_puppet()
if puppet_version and puppet_version < PUPPET_VERSION:
self.report(ydata.FAILED +
"puppet version is older than what is suggested")
|
#!/usr/bin/env python
# for each antibody, we want a different cell type
import re
import sys
import numpy as np
import os
import random
import subprocess
from optparse import OptionParser
class Data:
def __init__(self,A):
self.set_name = A[0][:-13] + '.set'
for a in A[1].split(';'):
field,value = a.split('=')
if 'cell' in field:
self.cell = value
if 'antibody' in field:
self.antibody = value
parser = OptionParser()
parser.add_option("-d",
"--data_dir",
dest="data_dir",
help="Data directory")
parser.add_option("-o",
"--out_dir",
dest="out_dir",
help="Output dir")
parser.add_option("-f",
"--files",
dest="files",
help="Files directory")
(options, args) = parser.parse_args()
if not options.data_dir:
parser.error('Data directory not given')
if not options.files:
parser.error('File directory not given')
if not options.out_dir:
parser.error('Output directory not given')
f = open(options.files,'r')
cell_types = {}
for l in f:
A = l.rstrip().split('\t')
if 'broadPeak' in A[0]:
b=Data(A)
if not b.cell in cell_types:
cell_types[b.cell] = {}
if not b.antibody in cell_types[b.cell]:
cell_types[b.cell][b.antibody] = []
cell_types[b.cell][b.antibody].append(b)
f.close()
cell_type_counts={}
for cell_type in cell_types:
l = len(cell_types[cell_type])
if not l in cell_type_counts:
cell_type_counts[l] = []
cell_type_counts[l].append(cell_type)
possible_cell_types = []
for count in sorted(cell_type_counts.keys()):
if count > 1:
for a in cell_type_counts[count]:
possible_cell_types.append(a)
for cell_type in possible_cell_types:
rand_choice = []
print options.out_dir + '/' + cell_type + '.set'
for antibody in cell_types[cell_type]:
r = random.choice(cell_types[cell_type][antibody])
rand_choice.append(options.data_dir + '/' + r.set_name)
cmd = 'cat ' + \
' '.join(rand_choice) + \
' > ' + options.out_dir + '/' + cell_type + '.set'
os.system(cmd)
|
"""
This module gives access to the limitted word2vec model that contains the words
similar to the emojis in emojilib.
"""
import gc
import os
import numpy as np
from gensim import matutils
from gensim.models.keyedvectors import KeyedVectors
from paths import BIN_NAME, DP_NAME, SAVE_NAME, NSAVE_NAME
MAX_DEGREE = 0.5 # Not really a degree; just a number from 0 to 1 representing similarity
CHUNKSIZE = 1000 # For splitting up memmaps
def generate_dps(wordcorpus):
"""Generate the maximum similarity of each word to emoji names.
It takes in a corpus which is a set of all emoji words.
"""
print('Loading model')
model = KeyedVectors.load_word2vec_format(BIN_NAME, binary=True)
print('Model loaded!')
# Limit the corpus to words in the model
wcl = list(word for word in wordcorpus if word in model.vocab)
# Precompute word vectors so the loops are faster
corpus = np.array([matutils.unitvec(model.word_vec(word)) for word in wcl])
print('Created corpus with {} elements'.format(len(corpus)))
print('Computing norms')
model.init_sims(replace=True)
# Save memory by deleting non-normed data
syn0norm = model.syn0norm
del model
# Convert sys0norm to a memmap to further reduce memory
print('Saving to memmap')
inarr = np.memmap('inmemmap.dat', dtype=syn0norm.dtype, mode='w+', shape=syn0norm.shape)
inarr[:] = syn0norm[:]
outarr = np.memmap('outmemmap.dat', dtype=syn0norm.dtype, mode='w+', shape=(syn0norm.shape[0],))
# Discard the array now that it's stored in a memmap
del syn0norm
print('Computing dot products')
for c in range(0, int(inarr.shape[0]/CHUNKSIZE)):
cend = min(inarr.shape[0], (c+1)*CHUNKSIZE)
outarr[c*CHUNKSIZE:cend] = np.amax(np.inner(inarr[c*CHUNKSIZE:cend], corpus), axis=1)
np.save(DP_NAME, outarr)
del inarr
del outarr
gc.collect()
os.remove('inmemmap.dat')
os.remove('outmemmap.dat')
def generate_limittedmodel():
"""Generate the word2vec model with a subset of the original vocab.
The dot products will need to have been computed, so `generate_dps()` may
need to be called before this function.
"""
print('Loading model')
model = KeyedVectors.load_word2vec_format(BIN_NAME, binary=True)
print('Model loaded!')
print('Loading dot products')
dp = np.load(DP_NAME)
print('Dot products loaded')
print('Filtering vocab')
for name, vocab in list(model.vocab.items()):
if dp[vocab.index] < MAX_DEGREE:
del model.vocab[name]
il = list(model.vocab.items())
print('Sorting vocab')
il.sort(key=lambda x: x[1].index)
# Find the indexes of the words that are being kept
print('Generating indexes')
indexes = []
for i in range(0, len(il)):
name, vocab = il[i]
indexes.append(vocab.index)
model.vocab[name].index = i
print('Modifying model weights')
model.syn0 = model.syn0[indexes]
print('Saving file')
model.save_word2vec_format(SAVE_NAME, binary=True)
def generate_normedmodel():
"""Generate a word2vec model with all vectors normed."""
# Load the reduced word2vec model
print('Loading model')
model = KeyedVectors.load_word2vec_format(SAVE_NAME, binary=True)
print('Model loaded!')
print('Computing norms')
model.init_sims(replace=True)
print('Saving model')
model.save(NSAVE_NAME)
def normedmodel(corpus):
"""Return the limitted word2vec model.
The function takes in a corpus which is a set of all emoji words.
"""
if not os.path.isfile(NSAVE_NAME):
if not os.path.isfile(SAVE_NAME):
if not os.path.isfile(DP_NAME):
generate_dps(corpus)
generate_limittedmodel()
generate_normedmodel()
# Load the reduced word2vec model
print('Loading model')
model = KeyedVectors.load(NSAVE_NAME, mmap='r')
print('Model loaded!')
return model
def vectorcorpus(model, wcl):
"""Return an array of word vectors for the dict of words.
The provided model is assumed to have normed vectors.
"""
corpus = np.array([model.word_vec(word) for word, _ in wcl])
print('Created corpus with {} elements'.format(len(corpus)))
return corpus
|
import abc
class Optimizer(abc.ABC):
@abc.abstractmethod
def function_to_minimize(self, parameters):
pass
@abc.abstractmethod
def run(self):
pass
|
'''
application of stack
1- function calls
2- checking for balanced paranthesis
3- reversing items
4- infix to prefix/postfix
5- evaluation to prefix/postfix
6- stock span problem and its variations
7- forward/backward
implementation of stack in python
1- using list :
--append at the end
--remove at the end
2- using collections.deque : it is the implementation of a doubley-linked list
--
3- using queue.LIFO_queue -- not required to study more as it is mainly used in a multi-threaded env
4- using our own implementation
'''
'''
using list
'''
'''
using deque
it is an implementation of the doubley linked list.
'''
from collections import deque
stack = deque()
#append operation
stack.append(1)
stack.append(2)
stack.append(3)
print(stack)
stack.pop()
import math
class Node:
def __init__(self, d):
self.data = d
self.next = None
class MyStack:
def __init__(self):
self.head = None
self.sz = 0
def push(self, x):
temp = Node(x)
temp.next = self.head
self.head = temp
self.sz = self.sz+1
def size(self):
return self.sz
def peek(self):
if self.head == None:
return math.inf
return self.head.data
def pop(self):
if self.head==None:
return math.inf
res=self.head.data
self.head=self.head.next
self.sz=self.sz-1
return res
s=MyStack()
s.push(10)
s.push(20)
s.push(30)
s.push(60)
print(s.pop())
print(s.peek())
print(s.size())
print(s.peek())
|
#!/usr/bin/python
import sys
import yaml
import requests
import argparse
import logging
import logging.handlers
DEFAULT_APIBASE = 'https://api.aprs.fi/api/'
DEFAULT_USER_AGENT = 'aprsfi-py-api-client 1.0'
class APRSFIClient(object):
"""
Post objects to aprs.fi using the REST API. Note that this API is
currently not available on the main aprs.fi site.
"""
def __init__(self, logger = None, apibase = DEFAULT_APIBASE, apikey = None, basicauth_user = None, basicauth_pass = None, user_agent = DEFAULT_USER_AGENT):
self.log = logger
self.apibase = apibase
self.apikey = apikey
if basicauth_user and basicauth_pass:
self.basic_auth = (basicauth_user, basicauth_pass)
else:
self.basic_auth = None
self.user_agent = user_agent
def api_req(self, url, url_params, postdata = None, loginfo = ''):
url_full = self.apibase + url
headers = {
'User-Agent': self.user_agent
}
url_params['apikey'] = self.apikey
try:
r = requests.post(url_full, params = url_params, json = postdata, auth = self.basic_auth,
headers = headers, timeout = 30)
r.raise_for_status()
rdata = r.json()
if rdata.get('result') == 'ok':
self.log.info("OK: %s got %d: %s" % (loginfo, r.status_code, r.text))
else:
self.log.error("FAIL: %s got %d: %s" % (loginfo, r.status_code, r.text))
except requests.exceptions.HTTPError as exc:
self.log.error("FAIL HTTP: %s: %r", loginfo, exc)
return
except Exception as exc:
self.log.error("FAIL HTTP: %s: %r", loginfo, exc)
return
def post_object(self, obj):
self.api_req("post", {"what": "loc"}, {
'type': 'o',
'name': obj.get('name'),
'comment': obj.get('comment'),
'symbol': obj.get('symbol'),
'locs': [
{
'lat': obj.get('lat'),
'lng': obj.get('lon'),
}
]
}, loginfo = "post loc '%s'" % obj.get('name'))
def process_yaml(self, yo):
objects = yo.get("objects", [])
for obj in objects:
self.post_object(obj)
def process_file(self, fname):
with open(fname, 'r') as stream:
try:
yo = yaml.load(stream)
except yaml.YAMLError as exc:
self.log.error("YAML failure for %s: %r", fname, exc)
return
self.process_yaml(yo)
def process_url(self, url):
try:
r = requests.get(url, timeout = 30)
r.raise_for_status()
yo = yaml.load(r.text)
except yaml.YAMLError as exc:
self.log.error("YAML failure for %s: %r", url, exc)
return
except requests.exceptions.HTTPError as exc:
self.log.error("YAML HTTP error for %s: %r", url, exc)
return
except Exception as exc:
self.log.error("YAML exception for %s: %r", url, exc)
return
self.process_yaml(yo)
def get_logger():
log = logging.getLogger('aprsfi-api-client')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)s: %(levelname)s - %(message)s')
handler = logging.handlers.SysLogHandler(address = '/dev/log')
handler.setFormatter(formatter)
log.addHandler(handler)
return log
def main():
parser = argparse.ArgumentParser(description='Upload data to aprs.fi API')
parser.add_argument('--api-key', dest='api_key', type=str, help='API key')
parser.add_argument('--base-url', dest='base_url', type=str, default=DEFAULT_APIBASE, help='API base URL')
parser.add_argument('--input-file', dest='input_file', type=str, help='YAML file path')
parser.add_argument('--input-url', dest='input_url', type=str, help='YAML file URL')
# only used in testing environment
parser.add_argument('--basicauth-user', dest='basicauth_user', type=str, help='debug/test env: username')
parser.add_argument('--basicauth-pass', dest='basicauth_pass', type=str, help='debug/test env: password')
args = parser.parse_args()
logger = get_logger()
ayo = APRSFIClient(logger = logger, apibase = args.base_url, apikey = args.api_key, basicauth_user = args.basicauth_user, basicauth_pass = args.basicauth_pass)
if args.input_file:
ayo.process_file(args.input_file)
if args.input_url:
ayo.process_url(args.input_url)
if __name__ == "__main__":
# execute only if run as a script
main()
|
import torch
import torchvision
import torch.nn as nn
import torch.nn.modules as M
import copy
from math import log10, floor
import time
import numpy as np
from .visualization_utils import *
def train(net, trainloader, criterion, optimizer, nbr_epochs=-1, nbr_images=-1, max_time=-1, train_monitors=[], resume=None):
total_time = time.time()
try:
if (nbr_epochs < 0) and (nbr_images < 0):
print("Warning : you should set either nbr_epochs or nbr_images")
nbr_epochs = 1
if (nbr_epochs < 0):
nbr_epochs = 2 + int(nbr_images/len(trainloader)) # if we arrive at the end of an epoch
if isinstance(train_monitors, Train_Monitor):
train_monitors = [train_monitors]
if isinstance(train_monitors, list):
train_monitors = List_Train_Monitor(train_monitors, default_step=len(trainloader))
# Initialization and/or loading of the previous train parameters
if resume is None:
resume = {}
train_state = {}
# We define monitors data
monitors_data = {}
for k in train_monitors.names():
monitors_data[k] = []
if k in resume.keys():
monitors_data[k] = resume[k]
# We setup the train state
total_running_loss = 0.0
if ("total_running_loss" in resume.keys()):
total_running_loss = resume["total_running_loss"]
total_train_error = 0
if ("total_train_error" in resume.keys()):
total_train_error = resume["total_train_error"]
nbr_loss_examples = 0
if ("nbr_loss_examples" in resume.keys()):
nbr_loss_examples = resume["nbr_loss_examples"]
nbr_train_examples = 0
if ("nbr_train_examples" in resume.keys()):
nbr_train_examples = resume["nbr_train_examples"]
start_epoch = 0
if ("epoch" in resume.keys()):
start_epoch = resume["epoch"]
i = 0
if ("i" in resume.keys()):
i = resume["i"]
start_i = i
stop_time = time.time()
start_time = stop_time
if ("time" in resume.keys()):
start_time = stop_time - resume["time"]
urgent_stop = False
# We initialize the train state variable
train_state["i"] = i
train_state["epoch"] = int((i-1)/len(trainloader))
train_state["time"] = stop_time - start_time
train_state["total_running_loss"] = total_running_loss
train_state["total_train_error"] = total_train_error
train_state["nbr_loss_examples"] = nbr_loss_examples
train_state["nbr_train_examples"] = nbr_train_examples
train_monitors.resume(net, train_state)
# We start the clock just before training !
start_time += time.time() - stop_time
for epoch in range(start_epoch, start_epoch + nbr_epochs):
if urgent_stop:
break;
for data in trainloader:
if (nbr_images >= 0) and (i >= nbr_images + start_i):
urgent_stop = True
break;
if (max_time >= 0) and (time.time() - start_time > max_time):
urgent_stop = True
break;
i += 1
# get the inputs
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
to_print = ""
# Update running loss
total_running_loss += loss.item()
nbr_loss_examples += 1
# Update train error
_, predicted = torch.max(outputs.data, 1)
total_train_error += (predicted != labels).sum().item()
nbr_train_examples += labels.size(0)
# We execute the train monitors
stop_time = time.time()
train_state["i"] = i
train_state["epoch"] = int((i-1)/len(trainloader))
train_state["time"] = stop_time - start_time
train_state["total_running_loss"] = total_running_loss
train_state["total_train_error"] = total_train_error
train_state["nbr_loss_examples"] = nbr_loss_examples
train_state["nbr_train_examples"] = nbr_train_examples
monitors_data, to_print = train_monitors(net, train_state, monitors_data)
# for k,v in res.items():
# monitors_data[k].append((i, stop_time - start_time, v))
start_time += time.time() - stop_time
# We print statistics
if (to_print != ""):
to_print = "i : {0: <10}".format(i) + to_print
to_print = "epoch : {0: <5}".format(int((i-1)/len(trainloader))) + to_print
to_print = "time : {0: <10}".format(round(time.time() - start_time, 3)) + to_print
print(to_print)
# We save the results, also used for resuming training later
for k in train_state.keys():
resume[k] = train_state[k]
for k in monitors_data.keys():
resume[k] = monitors_data[k]
print("\n Total time of the train : {} s".format(round(time.time() - total_time, 3)))
return resume
except KeyboardInterrupt:
train_state["i"] = i
train_state["epoch"] = int((i-1)/len(trainloader))
train_state["time"] = stop_time - start_time
train_state["total_running_loss"] = total_running_loss
train_state["total_train_error"] = total_train_error
train_state["nbr_loss_examples"] = nbr_loss_examples
train_state["nbr_train_examples"] = nbr_train_examples
for k in train_state.keys():
resume[k] = train_state[k]
for k in monitors_data.keys():
resume[k] = monitors_data[k]
print("\n Total time of the train : {} s".format(round(time.time() - total_time, 3)))
raise
def test_output(net, testloader):
all_predicted = []
all_truth = []
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
all_predicted += list(predicted.detach().numpy())
all_truth += list(labels.detach().numpy())
return (np.asarray(all_predicted), np.asarray(all_truth))
def prediction_error(predicted, ground_truth, to_print=False):
ne = np.sum((predicted != ground_truth).astype(int))
pred_error = ne*100./predicted.shape[0]
if to_print:
print("error = {}".format(round(pred_error, 3)))
return pred_error
def prediction_error_per_class(predicted, ground_truth):
pred_errors = []
for i in range(np.max(ground_truth) + 1):
gt_eq_i = (ground_truth == i).astype(int)
ne = np.sum((predicted != i).astype(int)*gt_eq_i)
pred_errors.append(ne*100./np.sum(gt_eq_i))
return pred_errors
def prediction_false_positive_per_class(predicted, ground_truth):
pred_errors = []
for i in range(np.max(ground_truth) + 1):
gt_neq_i = (ground_truth != i).astype(int)
ne = np.sum((predicted == i).astype(int)*gt_neq_i)
pred_errors.append(ne*100./np.sum(1-gt_neq_i))
return pred_errors
def test(net, testloader, to_print=True):
predicted, ground_truth = test_output(net, testloader)
test_error = prediction_error(predicted, ground_truth, to_print=False)
if to_print:
print("test_error = {}".format(round(test_error, 3)))
return test_error
def full_test(net, testloader, classes, to_print=True):
""" Deprecated """
nbr_classes = len(classes)
class_not_correct = [0 for i in range(nbr_classes)]
class_total = [0 for i in range(nbr_classes)]
class_fp = [0 for i in range(nbr_classes)]
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted != labels).squeeze()
for i in range(4):
label = labels[i]
class_not_correct[label] += c[i].item()
class_total[label] += 1
class_fp[predicted[i]] += c[i].item()
if to_print:
test_error = np.sum(class_not_correct) * 100. / np.sum(class_total)
s = "average error = {}\n".format(round(test_error, 3))
lc = np.max([len(c) for c in classes])
tc = "{{0: >{}}} : ".format(lc)
for i, c in enumerate(classes):
if (class_total[i] == 0):
class_total[i] = -1
s2 = tc.format(c)
s2 += "error = {0: <10}".format(round(class_not_correct[i] * 100. / class_total[i], 3))
s2 += "fp = {0: <10}".format(round(class_fp[i] * 100. / class_total[i], 3))
s += s2 + '\n'
print(s)
res = {}
res["error"] = class_not_correct
res["total"] = class_total
res["fp"] = class_fp
return res
def get_layers(net):
return _get_layers(net, [])
def _get_layers(net, all_layers=[]):
for layer in net.children():
if type(layer) == nn.Sequential: # if sequential layer
all_layers += _get_layers(layer, [])
if list(layer.children()) == []: # if leaf node, add it to list
all_layers.append(layer)
return all_layers
def freeze(net, layers=3):
all_layers = get_layers(net)
all_layers = [lay for lay in all_layers if hasattr(lay, 'weight')]
if (layers < 0):
layers += len(all_layers)
for i,lay in enumerate(all_layers):
if (i < layers):
lay.weight.requires_grad = False
lay.bias.requires_grad = False
else:
lay.weight.requires_grad = True
lay.bias.requires_grad = True
# ------------------------------------------------------------------ #
# | |
# | Now we define functions that will be called inside train |
# | |
# ------------------------------------------------------------------ #
class Train_Monitor(object):
def __init__(self, step=None):
self.name = "base_class"
self.step=step
def resume(self, net, train_state):
pass
def __call__(self, net, train_state=None):
return None
def plot(self):
pass
class List_Train_Monitor(object):
def __init__(self, train_monitors, default_step=10000):
self.train_monitors = []
for t in train_monitors:
if isinstance(t, tuple):
t[0].step = t[1]
self.train_monitors.append(t[0])
elif (t.step is None):
t.step = default_step
self.train_monitors.append(t)
else:
self.train_monitors.append(t)
def names(self):
return [t.name for t in self.train_monitors]
def resume(self, net, train_state):
for t in self.train_monitors:
t.resume(net, train_state)
def __call__(self, net, train_state, monitors_data):
to_print = ""
for t in self.train_monitors:
if (train_state["i"] % t.step == 0):
if isinstance(t, Save_Net):
r, p = t(net, train_state, monitors_data)
else:
r, p = t(net, train_state)
monitors_data[t.name].append((train_state["i"], train_state["time"], r))
to_print += p
return monitors_data, to_print
class Running_Loss(Train_Monitor):
def __init__(self, step=None):
self.name = "running_loss"
self.step = step
self.last_loss = 0
self.last_nbr_examples = 0
self.round_x_n = lambda x,n : round(x, -int(floor(log10(abs(x)))) + n - 1)
def resume(self, net, train_state):
self.last_loss = train_state["total_running_loss"]
self.last_nbr_examples = train_state["nbr_loss_examples"]
def __call__(self, net, train_state):
running_loss = (train_state["total_running_loss"] - self.last_loss)
running_loss *= 1./(train_state["nbr_loss_examples"] - self.last_nbr_examples)
self.resume(net, train_state)
to_print = "loss = {0: <11}".format(self.round_x_n(running_loss, 6))
return running_loss, to_print
def plot(self, data, x='i', linecolor='.-b'):
if data[self.name]:
plot_error(data[self.name], x=x, linecolor=linecolor)
plt.ylabel("loss")
class Train_Error(Train_Monitor):
def __init__(self, step=None):
self.name = "train_error"
self.step = step
self.last_error = 0
self.last_nbr_examples = 0
def resume(self, net, train_state):
self.last_error = train_state["total_train_error"]
self.last_nbr_examples = train_state["nbr_train_examples"]
def __call__(self, net, train_state):
train_error = (train_state["total_train_error"] - self.last_error)
train_error *= 100./(train_state["nbr_train_examples"] - self.last_nbr_examples)
self.resume(net, train_state)
to_print = "train_error = {0: <10}".format(round(train_error, 3))
return train_error, to_print
def plot(self, data, x='i', linecolor='.-k'):
if data[self.name]:
plot_error(data[self.name], x=x, linecolor=linecolor)
class Test_Prediction(Train_Monitor):
def __init__(self, testloader, step=None):
self.name = "test_prediction"
self.step = step
self.testloader = testloader
self.last_i = 0
def __call__(self, net, train_state=None, recompute=None):
if (train_state is None):
if recompute or ((self.last_i is not None) and (recompute is None)):
self.prediction, self.ground_truth = test_output(net, self.testloader)
self.last_i = None
elif (train_state["i"] != self.last_i):
self.prediction, self.ground_truth = test_output(net, self.testloader)
self.last_i = train_state["i"]
return (self.prediction, self.ground_truth), ""
class Test_Error(Train_Monitor):
def __init__(self, testloader_or_test_prediction, step=None):
self.name = "test_error"
self.step = step
self.test_prediction = testloader_or_test_prediction
if not isinstance(testloader_or_test_prediction, Test_Prediction):
self.test_prediction = Test_Prediction(testloader_or_test_prediction, step)
def __call__(self, net, train_state=None, recompute=None):
res, _ = self.test_prediction(net, train_state, recompute)
prediction, ground_truth = res
test_error = prediction_error(prediction, ground_truth, to_print=False)
to_print = "test_error = {0: <10}".format(round(test_error, 3))
return test_error, to_print
def plot(self, data, x='i', linecolor='.-r'):
if data[self.name]:
plot_error(data[self.name], x=x, linecolor=linecolor)
class Test_Error_Per_Class(Train_Monitor):
def __init__(self, testloader_or_test_prediction, step=None):
self.name = "test_error_per_class"
self.step = step
self.test_prediction = testloader_or_test_prediction
if not isinstance(testloader_or_test_prediction, Test_Prediction):
self.test_prediction = Test_Prediction(testloader_or_test_prediction, step)
def __call__(self, net, train_state=None, recompute=None):
res, _ = self.test_prediction(net, train_state, recompute)
prediction, ground_truth = res
test_error = prediction_error_per_class(prediction, ground_truth)
temp = ["{0: <5}".format(round(i, 1)) for i in test_error]
to_print = "test_error_per_class = {} ".format(" ".join(temp))
return test_error, to_print
def plot(self, data, x='i', linecolor='.-m'):
if data[self.name]:
for i in range(len(data[self.name][0][2])):
d = [(j[0], j[1], j[2][i]) for j in data[self.name]]
plot_error(d, x=x, linecolor=linecolor)
class Test_False_Positive_Per_Class(Train_Monitor):
def __init__(self, testloader_or_test_prediction, step=None):
self.name = "test_false_positive_per_class"
self.step = step
self.test_prediction = testloader_or_test_prediction
if not isinstance(testloader_or_test_prediction, Test_Prediction):
self.test_prediction = Test_Prediction(testloader_or_test_prediction, step)
def __call__(self, net, train_state=None, recompute=None):
res, _ = self.test_prediction(net, train_state, recompute)
prediction, ground_truth = res
test_error = prediction_false_positive_per_class(prediction, ground_truth)
temp = ["{0: <5}".format(round(i, 1)) for i in test_error]
to_print = "test_fp_per_class = {} ".format(" ".join(temp))
return test_error, to_print
def plot(self, data, x='i', linecolor='.-m'):
if data[self.name]:
for i in range(len(data[self.name][0][2])):
d = [(j[0], j[1], j[2][i]) for j in data[self.name]]
plot_error(d, x=x, linecolor=linecolor)
class Save_Net(Train_Monitor):
def __init__(self, step=None, filename="saved_nets/autosave_net.pth", save_monitors=True):
self.name = "net_save"
self.step = step
self.filename = filename
self.save_monitors = save_monitors
def __call__(self, net, train_state={}, monitors_data={}):
to_save = train_state
fn = self.filename
if ("i" in train_state.keys()) and ("{}" in self.filename):
fn = self.filename.format(i)
if self.save_monitors:
to_save = dict(to_save, **monitors_data)
to_save["net"] = net
torch.save(to_save, fn)
return fn, "net saved"
def load_net(filename="saved_nets/autosave_net.pth"):
res = torch.load(filename)
net = res["net"]
del res["net"]
return net, res
class Copy_Net(Train_Monitor):
def __init__(self, step=None):
self.name = "net_copy"
self.step = step
self.net = None
def resume(self, net, train_state):
self.net = copy.deepcopy(net)
def __call__(self, net, train_state=None):
self.resume(net, train_state)
return net, ""
class Net_Parameters_Change(Train_Monitor):
def __init__(self, step=None, copy_net=None):
self.name = "net_parameters_change"
self.step = step
self.net = copy_net
if not isinstance(self.net, Copy_Net):
self.net = Copy_Net()
def resume(self, net, train_state):
self.net.resume(net, train_state)
def get_weights(self, net, layer_type):
layers = [(k,j,i) for k,(j,i) in enumerate(net.named_children()) if isinstance(i, layer_type)]
return [(k,j,i.weight.detach().numpy()) for (k,j,i) in layers]
def __call__(self, net, train_state=None):
conv_weight = self.get_weights(net, M.conv._ConvNd)
old_conv_weight = self.get_weights(self.net.net, M.conv._ConvNd)
conv_diff = [(k,j,np.mean(abs(l-i))) for (n,m,l),(k,j,i) in zip(conv_weight, old_conv_weight)]
lin_weight = self.get_weights(net, M.linear.Linear)
old_lin_weight = self.get_weights(self.net.net, M.linear.Linear)
lin_diff = [(k,j,np.mean(abs(l-i))) for (n,m,l),(k,j,i) in zip(lin_weight, old_lin_weight)]
res = conv_diff + lin_diff
res.sort()
self.resume(net, train_state)
return res, ""
def plot(self, data, x='i', linecolor='.-'):
if data[self.name]:
leg = []
for i in range(len(data[self.name][0][2])):
d = [(j[0], j[1], j[2][i][2]) for j in data[self.name]]
plot_error(d, x=x, linecolor=linecolor)
leg.append(str(data[self.name][0][2][i][0]) + " : " + data[self.name][0][2][i][1])
plt.ylabel("mean abs change")
plt.legend(leg)
class Show_Conv_Filters(Train_Monitor):
def __init__(self, step=None):
self.name = "conv_filters"
self.step = step
def get_weights(self, net):
lay = [(k,j,i) for k,(j,i) in enumerate(net.named_children()) if isinstance(i, M.conv.Conv2d)]
return [(k,j,i.weight.detach().numpy()) for (k,j,i) in lay]
def __call__(self, net, train_state=None):
conv_weight = copy.deepcopy(self.get_weights(net))
return conv_weight, ""
def plot(self, data, layer=0):
if data[self.name]:
leg = []
try:
if isinstance(layer, int):
ind = [i[0] for i in data[self.name][0][2]].index(layer)
else:
ind = [i[1] for i in data[self.name][0][2]].index(layer)
except ValueError:
pass
ind = 0 # TODO show conv filters for the other layers
d = np.asarray([j[2][ind][2] for j in data[self.name]])
mi = d.min()
ma = d.max()
d = (d - mi)/(ma - mi)
filters = [stack_filters(i, axis="y", move_axis=False) for i in d]
filters = stack_filters(np.stack(filters), axis='x')
plt.imshow(filters)
plt.ylabel("filters")
plt.xlabel("nbr train images (*{})".format(self.step))
|
import os
import django
import re
from PIL import Image
from django.core.files.base import ContentFile
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ImageAnnotation.settings")
django.setup()
from Annotation import models
from django.core.files import File
# load_path="/Users/qianzheng/Downloads/fd3/"
# pathDir=os.listdir(load_path)
# for path_dir in pathDir:
# if path_dir !='.DS_Store':
# p=os.listdir(load_path+path_dir)
# i=0
# id_list=[]
# # print(p)
# for pp in p:
# if pp[:-4]==path_dir:
# print(pp[:-4])
# elif pp[-4:]=='.jpg':
# pass
width=100
height=100
id_list=[1,2,3]
filename="北山"
img=open('/Users/qianzheng/Downloads/fd/9.jpg','rb')
print(img)
myfile=File(img)
print(myfile)
# img.save()
obj=models.Painting(width=width,height=height,id_list=id_list,filename=filename,picture_url= 'img/5.jpg')
obj.save() |
# -*- coding:utf-8 -*-
###########################################################################
# #
# Program : realize the calculation of various performance indicators #
# #
###########################################################################
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
def rmse_mae(pred, actual):
pred = pred[actual.nonzero()].flatten()
actual = actual[actual.nonzero()].flatten()
return np.sqrt(mean_squared_error(pred, actual)), mean_absolute_error(pred, actual)
def recall_precision(pred, test, topN = 20):
row, column = test.shape
hit = 0
recall = 0
precision = 0
for user in range(row):
actual = np.argsort(-test[user, test[user].nonzero()]).flatten()
predict = np.argsort(-pred[user, pred[user].nonzero()]).flatten()
hit += len(set(actual).intersection(set(predict[:topN])))
recall += actual.size
precision += topN
return hit, hit/recall, hit/precision
def recall_precision_arhr(pred, test, topN = 20):
row, column = test.shape
hit, recall, precision, arhr = 0, 0, 0, 0
for user in range(row):
actual = np.argsort(-test[user, test[user].nonzero()]).flatten()
predict = np.argsort(-pred[user, pred[user].nonzero()]).flatten()
hit += len(set(actual).intersection(set(predict[:topN])))
topn_list = predict[:topN].tolist()
hit_list = list(set(actual).intersection(set(predict[:topN])))
for h in hit_list:
arhr += 1 / (topn_list.index(h) + 1)
recall += actual.size
precision += topN
return hit, hit/recall, hit/precision, arhr/row
'''
def calculate_recall_precision_for_qrlatentfactor(pred_matrix, test, topN = 20):
row, column = test.shape
hit = 0
recall = 0
precision = 0
for user in range(10):
actual = np.argsort(-test[user, test[user].nonzero()]).flatten()
predict = np.argsort(-pred_matrix[user, pred_matrix[user].nonzero()]).getA().flatten()
hit += len(set(actual).intersection(set(predict[:topN])))
recall += actual.size
precision += topN
return hit, hit/recall, hit/precision
def eval_recall_precision_ARHR_for_qrlatentfactor(pred_matrix, test, topN = 20):
row, column = test.shape
hit, recall, precision, arhr = 0, 0, 0, 0
for user in range(row):
actual = np.argsort(-test[user, test[user].nonzero()]).flatten()
predict = np.argsort(-pred_matrix[user, pred_matrix[user].nonzero()]).getA().flatten()
hit += len(set(actual).intersection(set(predict[:topN])))
topn_list = predict[:topN].tolist()
hit_list = list(set(actual).intersection(set(predict[:topN])))
for h in hit_list:
arhr += 1 / (topn_list.index(h) + 1)
recall += actual.size
precision += topN
return hit, hit/recall, hit/precision, arhr/row
'''
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from .formsVentas import VehiculoFormAll, VehiculoFormOne, ClienteForm, UserForm
from administracion.models import Vehiculo
from django.http import HttpResponseRedirect
from django.contrib.sessions.models import Session
from django.contrib.auth.models import User
from django.contrib import messages
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.contrib.auth.hashers import make_password
from django.contrib.auth.decorators import user_passes_test
def es_vendedor(user):
return user.groups.filter(name='Vendedores').exists()
@user_passes_test(es_vendedor, login_url='/login/')
@login_required
def inicio(request):
return render(request, 'ventas/index.html')
@user_passes_test(es_vendedor, login_url='/login/')
@login_required
def listarCarros(request):
carros = VehiculoFormAll.vehiculos
user_form = VehiculoFormAll(request.POST)
if request.method == 'POST':
if user_form.is_valid():
marca = user_form.cleaned_data['marcaChoice']
precio = user_form.cleaned_data['precioChoice']
carros = VehiculoFormAll.vehiculosFiltro(marca,precio)
else:
user_form = VehiculoFormAll()
return render(request, 'ventas/listarCarros.html', { 'user_form': user_form, 'carros': carros})
@user_passes_test(es_vendedor, login_url='/login/')
@login_required
def comprarCarro(request, codigo_vehiculo):
if request.method == "POST":
user_form = ClienteForm(request.POST)
vehiculo = VehiculoFormOne.get(codigo_vehiculo)
if user_form.is_valid():
username = user_form.cleaned_data['clienteChoice']
session = Session.objects.get(session_key=request.session.session_key)
uid = session.get_decoded().get('_auth_user_id')
user = User.objects.get(pk=uid)
placa = ClienteForm.generarPlaca()
porcentaje_descuento = user_form.cleaned_data['porcentaje_descuento']
vendido = VehiculoFormOne.save(vehiculo.codigo_vehiculo, username, user, placa, porcentaje_descuento)
if vendido:
messages.success(request, "El "+vehiculo.marca+" del modelo "+vehiculo.modelo+ " con placas "+placa+" ha sido vendido exitosamente a "+username.username+" por "+user.username+"!")
else:
messages.warning(request, "Usted no es un vendedor, por lo tanto no puede vender")
else:
messages.warning(request, "No has diligenciado correctamente todos los campos")
else:
vehiculo = VehiculoFormOne.get(codigo_vehiculo)
vehiculo.valor = VehiculoFormAll.soloCosto(vehiculo)
user_form = ClienteForm()
return render(request, 'ventas/comprarCarro.html', { 'user_form': user_form, 'vehiculo': vehiculo})
@user_passes_test(es_vendedor, login_url='/login/')
@login_required
def cotizarCarros(request):
carros = VehiculoFormAll.vehiculos
user_form = VehiculoFormAll(request.POST)
if request.method == 'POST':
if user_form.is_valid():
marca = user_form.cleaned_data['marcaChoice']
precio = user_form.cleaned_data['precioChoice']
carros = VehiculoFormAll.vehiculosFiltro(marca,precio)
else:
user_form = VehiculoFormAll()
return render(request, 'ventas/cotizarCarros.html', { 'user_form': user_form, 'carros': carros})
@user_passes_test(es_vendedor, login_url='/login/')
@login_required
def cotizarCarro(request,codigo_vehiculo):
vehiculo = VehiculoFormOne.get(codigo_vehiculo)
cartX =VehiculoFormOne.marcar(vehiculo)
if cartX != 0:
messages.warning(request, "El "+vehiculo.marca+" del modelo "+vehiculo.modelo+ " ya había sido cotizado")
else:
messages.success(request, "El "+vehiculo.marca+" del modelo "+vehiculo.modelo+ " ha sido agregado a la cotizacion")
carros = VehiculoFormAll.vehiculos
user_form = VehiculoFormAll(request.POST)
if request.method == 'POST':
if user_form.is_valid():
marca = user_form.cleaned_data['marcaChoice']
precio = user_form.cleaned_data['precioChoice']
carros = VehiculoFormAll.vehiculosFiltro(marca,precio)
else:
user_form = VehiculoFormAll()
return render(request, 'ventas/cotizarCarros.html', { 'user_form': user_form, 'carros': carros})
@user_passes_test(es_vendedor, login_url='/login/')
@login_required
def cotizacion(request):
vehiculos = VehiculoFormAll.vehiculoCompleto(VehiculoFormOne.cotizados())
costo = VehiculoFormOne.costoCotizados(vehiculos)
user_form = ClienteForm(request.POST)
if request.method == 'POST' and "vender" in request.POST:
if user_form.is_valid():
username = user_form.cleaned_data['clienteChoice']
session = Session.objects.get(session_key=request.session.session_key)
uid = session.get_decoded().get('_auth_user_id')
user = User.objects.get(pk=uid)
i = 0
vendido = True
for vehiculo in vehiculos:
placa = ClienteForm.generarPlaca()
porcentaje_descuento = user_form.cleaned_data['porcentaje_descuento']
vendido = VehiculoFormOne.save(vehiculo.codigo_vehiculo, username, user, placa, porcentaje_descuento)
i=i+1
if i==0:
messages.warning(request, "No hay carros en la cotizacion")
elif vendido:
VehiculoFormOne.borrarCotizados(vehiculos)
messages.success(request, "Todos los vehiculos han sido vendidos a "+str(username.username)+" por un valor total de $"+str(costo))
else:
messages.warning(request, "Usted no es un vendedor, por lo tanto no puede vender")
else:
messages.warning(request, "No has diligenciado correctamente todos los campos")
elif request.method == 'POST' and "eliminar" in request.POST:
VehiculoFormOne.borrarCotizados(vehiculos)
messages.success(request, "Los vehiculos de la cotizacion han sido eliminados")
else:
user_form = ClienteForm()
return render(request, 'ventas/cotizacion.html', {'user_form': user_form, 'vehiculos': vehiculos, 'costo': costo})
@user_passes_test(es_vendedor, login_url='/login/')
@login_required
@csrf_exempt
def cliente(request):
if request.method == 'POST':
# formulario enviado
user_form = UserForm(request.POST)
if user_form.is_valid():
usuario = user_form
usuario.password = make_password(user_form.cleaned_data['password'])
usuario.is_superuser = False
usuario.username = user_form.cleaned_data['username']
usuario.first_name = user_form.cleaned_data['first_name']
usuario.last_name = user_form.cleaned_data['last_name']
usuario.email = user_form.cleaned_data['email']
usuario.is_staff = False
usuario.is_active = True
usuario.cedula = user_form.cleaned_data['cedula']
usuario.direccion = user_form.cleaned_data['direccion']
usuario.fecha_de_nacimiento = user_form.cleaned_data['fecha_de_nacimiento']
usuario.telefono = user_form.cleaned_data['telefono']
UserForm.crearCliente(usuario)
messages.success(request,"El usuario "+usuario.username+" se ha creado satisfactoriamente")
else:
messages.warning(request, "No has diligenciado correctamente todos los campos o el cliente ya existe")
else:
# formulario inicial
user_form = UserForm()
return render(request, 'ventas/crearCliente.html', { 'user_form': user_form}) |
from django.contrib import admin
from django.urls import path , include
from django.conf import settings
from django.conf.urls.static import static
from app1.views import error_500
from django.conf.urls import handler500
urlpatterns = [
path('admin/', admin.site.urls),
path('' , include("app1.urls"))
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
handler500 = error_500
|
'''
安装、部署、打包的脚本。在 setup.py 文件中写明依赖的库和版本,以便到目标机器上能够使用 python setup.py install 安装。
''' |
import sys
from PyQt5.QtWidgets import QApplication, QMessageBox
from MainInterface import MainWindow
sys._excepthook = sys.excepthook
def my_exception_hook(exctype, value, traceback):
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText(str(value))
msg.setInformativeText(str(traceback))
msg.setWindowTitle(str(exctype))
msg.exec_()
# Print the error and traceback
print(exctype, value, traceback)
# Call the normal Exception hook after
sys._excepthook(exctype, value, traceback)
sys.exit(1)
if __name__ == '__main__':
sys.excepthook = my_exception_hook
app = QApplication(sys.argv)
ex = MainWindow()
sys.exit(app.exec_())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEbppInstserviceSignresultBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayEbppInstserviceSignresultBatchqueryResponse, self).__init__()
self._list = None
@property
def list(self):
return self._list
@list.setter
def list(self, value):
self._list = value
def parse_response_content(self, response_content):
response = super(AlipayEbppInstserviceSignresultBatchqueryResponse, self).parse_response_content(response_content)
if 'list' in response:
self.list = response['list']
|
import argparse
import json
import sys
import time
from copy import deepcopy
from json import JSONDecodeError
from math import factorial
import pq_trees
def fingerprint(tree, root=True):
# orders = tree.cardinality()
# if isinstance(tree, pq_trees.P):
# orders //= tree.number_of_children()
# return orders
orders = 1
if isinstance(tree, frozenset):
return 1
if tree.number_of_children() > 1:
if isinstance(tree, pq_trees.P):
childCount = tree.number_of_children()
if root:
childCount -= 1
orders *= factorial(childCount)
else:
assert isinstance(tree, pq_trees.Q)
orders *= 2
for child in tree:
orders *= fingerprint(child, False)
return orders
def processMatrix(matrix, all_restrictions):
mapping = [[i] for i in range(matrix["cols"])]
index = 0
for restriction in matrix["restrictions"]:
consecutive = restriction if not all_restrictions else restriction["consecutive"]
for col in consecutive:
mapping[col].append(-index - 1)
index += 1
if not all_restrictions:
del matrix["restrictions"]
mapping = [frozenset(s) for s in mapping]
start = time.perf_counter()
tree = pq_trees.P(mapping)
matrix["tree_type"] = "SageMath2"
matrix["name"] = algname
matrix["id"] = "{}/{}".format(algname, matrix["id"])
matrix["total_restrict_time"] = 0
matrix["init_time"] = matrix["total_time"] = (time.perf_counter() - start) * 1000_000_000
matrix_errors = set()
# matrix["sage_tree"] = str(tree)
last_idx = 0
for idx in range(0, matrix["rows"]):
start = time.perf_counter()
last = idx == matrix["rows"] - 1
last_idx = idx
try:
tree.set_contiguous(-idx - 1)
# restriction["sage_tree"] = str(tree)
possible = True
except ValueError as e:
if e.args[0] != pq_trees.impossible_msg:
raise
else:
possible = False
rtime = (time.perf_counter() - start) * 1000_000_000
if not last and not possible:
matrix["valid"] = False
matrix_errors.add("possible")
break
if last or all_restrictions:
restriction_results = matrix["restrictions"][idx] if all_restrictions else matrix["last_restriction"]
errors = []
restriction_results["time"] = rtime
restriction_results["possible"] = possible
fp = str(fingerprint(tree))
restriction_results["fingerprint"] = fp[:5] + str(len(fp))
if possible and restriction_results["fingerprint"] != restriction_results["exp_fingerprint"]:
errors.append("fingerprint")
if possible is not restriction_results["exp_possible"]:
errors.append("possible")
restriction_results["errors"] = errors
restriction_results["valid"] = not errors
if all_restrictions:
del restriction_results["consecutive"]
matrix_errors.update(errors)
matrix["total_time"] += rtime
matrix["total_restrict_time"] += rtime
matrix["valid"] = not matrix_errors
matrix["errors"] = list(matrix_errors)
matrix["complete"] = last_idx == matrix["rows"] - 1
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=str, default="SageMath2")
parser.add_argument("-r", type=int, default=1)
parser.add_argument("-A", default=False, action='store_true')
parser.add_argument("f", type=str)
args = parser.parse_args()
repetitions = args.r
filename = args.f
all_restrictions = args.A
algname = args.n
for i in range(repetitions):
with open(filename, "r") as f:
matrix = json.load(f)
processMatrix(matrix, all_restrictions)
if i is repetitions - 1:
print(json.dumps(matrix))
|
from .lpp_type import get_lpp_type
try:
import logging
except ImportError:
class logging:
def debug(self, *args, **kwargs):
pass
class LppData(object):
"""A single LPP data object representation
Attributes:
chn (int): data channel number
type (int): data type ID
value (tuple): data value(s)
"""
def __init__(self, chn, type_, value):
"""Create a LppData object with given attriubes
channel `chn`, type `type_`, and values `value`
"""
logging.debug("LppData.__init__")
if value is None:
raise ValueError("Empty value!")
if not isinstance(value, tuple):
value = (value,)
logging.debug("LppData(channel=%d, type=%d, len=%d)",
chn, type_, len(value))
if get_lpp_type(type_) is None:
raise ValueError("Invalid LPP data type!")
if not len(value) == get_lpp_type(type_).dimension:
raise ValueError("Invalid number of data values!")
self.channel = chn
self.type = type_
self.value = value
def __str__(self):
"""Return a pretty string representation of the LppData instance"""
logging.debug("LppData.__str__")
return 'LppData(channel = {}, type = {}, value = {})'.format(
self.channel, get_lpp_type(self.type).name, str(self.value))
@classmethod
def from_bytes(class_object, buf):
"""Parse LppData from given a byte string"""
logging.debug("LppData.from_bytes: buf=%s, length=%d",
buf, len(buf))
if len(buf) < 3:
raise BufferError("Invalid buffer size!")
chn = buf[0]
type_ = buf[1]
size = get_lpp_type(type_).size
logging.debug("LppData.from_bytes: date_size = %d", size)
if len(buf) < size + 2:
raise BufferError("Buffer too small!")
value = get_lpp_type(type_).decode(buf[2:(2 + size)])
return class_object(chn, type_, value)
def bytes(self):
"""Convert LppData instance into a byte string"""
logging.debug("LppData.bytes")
hdr_buf = bytearray([self.channel, self.type])
dat_buf = get_lpp_type(self.type).encode(self.value)
buf = hdr_buf + dat_buf
logging.debug(" out: bytes = %s, length = %d", buf, len(buf))
return buf
def bytes_size(self):
"""Return the length of the LppData byte string representation"""
logging.debug("LppData.bytes_size")
return (get_lpp_type(self.type).size + 2)
|
def zip(input1, input2):
if len(input1) == len(input2):
output = ""
for i in range(len(input1)):
output += input1[i] + input2[i]
return output
else:
return "Input two Strings of equal length."
print(zip("String", "Fridge"))
print(zip("Dog", "Cat"))
print(zip("True", "Tre4"))
print(zip("True", "QACommunity"))
|
import sys
def solve(N, xs):
table = [set() for _ in range(N)]
for idx, x in enumerate(xs):
table[x].add(idx)
stack = [idx for idx, s in enumerate(table) if not s]
while stack:
idx = stack.pop()
nxt = xs[idx]
table[nxt].remove(idx)
if not table[nxt]:
stack.append(nxt)
return sorted(idx for idx, s in enumerate(table) if s)
def main():
stdin = sys.stdin
N = int(stdin.readline())
X = [int(x) - 1 for x in stdin.readlines()]
res = solve(N, X)
print(len(res))
print('\n'.join(str(x + 1) for x in res))
if __name__ == "__main__":
main()
from util import *
def test_main():
in_str = """
7
3
1
1
5
5
4
6
""".strip()
out_str = """
3
1
3
5
""".strip()
assert evaluate_via_io(main, in_str) == out_str
def test_add1():
in_str = """
1
1
""".strip()
out_str = """
1
1
""".strip()
assert evaluate_via_io(main, in_str) == out_str
def test_add2():
in_str = """
3
2
2
1
""".strip()
out_str = """
1
2
""".strip()
assert evaluate_via_io(main, in_str) == out_str
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 20 17:54:19 2021
# Equipe:
# * Juliane
# * Rubens Lopes
# * Aline Soares
"""
from numpy import linalg,apply_along_axis
from pylab import plot,show,pcolor,colorbar,bone
from minisom import MiniSom
import numpy as np
#Cria um objeto(bunch) com os dados da base e seu atributos
parkinson = np.genfromtxt('D:/OneDrive/Faculdade/S7/parkinson/parkinson_formated.csv', delimiter= ',')
#imprime os dados da base
type(parkinson)
features = parkinson[:,0:-1]
#normalizacao
data = apply_along_axis(lambda x: x/linalg.norm(x),1,features)
for epocas in [100, 200, 300, 1000]:
### Initialization and training ###
som = MiniSom(20,20,754,sigma=1.0,learning_rate=0.01)
som.random_weights_init(data)
som.train_random(data,epocas)
bone()
pcolor(som.distance_map().T)
colorbar()
t = parkinson[:,-1].astype(int)
# use different colors and markers for each label
markers = ['o','s']
colors = ['r','g']
for cnt,xx in enumerate(data):
w = som.winner(xx) # getting the winner
# palce a marker on the winning position for the sample xx
plot(w[0]+.5,w[1]+.5,markers[t[cnt]],markerfacecolor='None',
markeredgecolor=colors[t[cnt]],markersize=12,markeredgewidth=2)
show()
|
employees = {1:p = HourlyEmployee(1, 'joe', 10, 80), 2:p = HourlyEmployee(2, 'Mike', 80000)}
for i in employee.value():
print(p.calculate()) |
# Generated by Django 2.1.7 on 2019-05-01 14:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0004_auto_20190501_2201'),
]
operations = [
migrations.AddField(
model_name='jobinfoclik',
name='companySize',
field=models.CharField(default='', max_length=255),
),
migrations.AddField(
model_name='jobinfoclik',
name='companyType',
field=models.CharField(default='', max_length=255),
),
migrations.AddField(
model_name='jobinfoclik',
name='info',
field=models.CharField(default='', max_length=255),
),
migrations.AddField(
model_name='jobinfoclik',
name='welfare',
field=models.CharField(default='', max_length=255),
),
migrations.AddField(
model_name='jobinfoclik',
name='workingExp',
field=models.CharField(default='', max_length=255),
),
migrations.AlterField(
model_name='jobinfoclik',
name='city',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='jobinfoclik',
name='companyName',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='jobinfoclik',
name='idName',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='jobinfoclik',
name='name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='jobinfoclik',
name='salary',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='jobinfoclik',
name='time',
field=models.CharField(max_length=255),
),
]
|
__author__ = 's7a'
# All imports
from nltk.tree import Tree
# The Relative clauses class
class RelativeClauses:
# Constructor for the Relative Clauses class
def __init__(self):
self.has_wh_word = False
self.np_subtrees = []
self.wh_subtrees = []
self.other_subtrees = []
# Break the tree
def break_tree(self, tree):
try:
self.has_wh_word = False
self.np_subtrees = []
self.wh_subtrees = []
self.other_subtrees = []
self.parse_tree(tree)
print "Relative Clause: " + str(self.has_wh_word)
if self.has_wh_word:
wh_part = " ".join(" ".join(self.wh_subtrees).split()[1:])
result_string = " ".join(self.np_subtrees) + " " + wh_part + "."
result_string += (" ".join(self.np_subtrees) + " ".join(self.other_subtrees)).replace(",", "")
else:
result_string = " ".join(tree.leaves())
print "Relative Clause Result: " + result_string
return result_string
except:
return " ".join(tree.leaves())
# Parse the tree
def parse_tree(self, tree):
if type(tree) == Tree:
sentence_root = tree[0]
if type(sentence_root) == Tree:
if sentence_root.label() == "S":
first_node = sentence_root[0]
if type(first_node) == Tree:
if first_node.label() == "NP":
for node in first_node:
if type(node) == Tree:
if node.label() == "NP":
self.np_subtrees.append(' '.join(node.leaves()))
elif node.label() == "SBAR":
node_1 = node[0]
if type(node_1) == Tree:
if node_1.label() == "WHNP":
self.has_wh_word |= True
self.wh_subtrees.append(' '.join(node.leaves()))
else:
self.other_subtrees.append(' '.join(node.leaves()))
else:
self.other_subtrees.append(' '.join(' '.join(node.leaves())))
else:
self.other_subtrees.append(node)
else:
self.other_subtrees.append(' '.join(first_node.leaves()))
else:
self.other_subtrees.append(first_node)
flag = 0
for node in sentence_root:
if flag == 0:
flag = 1
continue
if type(node) == Tree:
self.other_subtrees.append(' '.join(node.leaves()))
else:
self.other_subtrees.append(node) |
import urllib2 # the lib that handles the url stuff
import csv
data = urllib2.urlopen("https://raw.githubusercontent.com/clarketm/proxy-list/master/proxy-list.txt").read(20000) # it's a file like object and works just like a file
data = data.split("\n")
list = ''
for line in data: # files are iterable
if len(line)>1:
if line[0].isdigit():
if "-S" in line:
list += "HTTPS://" + line.rsplit(' ')[0] + \n
else:
list += "HTTP://" + line.rsplit(' ')[0] + \n
with open("Proxy.txt", "w") as text_file:
text_file.write(list) |
def createDictionary():
'''Returns a tiny Spanish dictionary'''
number = dict()
number['One'] = 1
number['Two'] = 2
number['Three'] = 3
return number
def main():
dictionary = createDictionary()
print(dictionary['One'],dictionary['Two'],dictionary['Three'])
main() # run the program |
import json
import praw
def main():
with open("private/creds.json") as f:
cj = json.load(f)
creds = cj["credentials"]
reddit = praw.Reddit(
client_id = creds["id"],
client_secret = creds["secret"],
user_agent = creds["appname"],
username = creds["username"],
password = creds["password"]
)
subreddit = reddit.subreddit('Cricket')
top_posts = subreddit.top(limit=10)
for index, post in enumerate(top_posts):
print(f"{index}. {post.title} [{post.shortlink}]")
if __name__ == '__main__':
main()
|
import os
path = 'C:/Users/64Squares/Desktop/Nikhil/financials_automated'
excel_files = [f for f in os.listdir(path) if f.endswith('.xlsx')]
print(excel_files)
#lets check for a particular value or file using regex
import re
# iterate the list
for item in excel_files:
p = re.compile('.*SAIS.*',item)
print(p)
#if(re.compile('.*SAIS.*',item)):
#print("Found a match for SAIS")
# if the item matches then out put the school id or something like that
|
'''
Start App
'''
import Tkinter as tk
from tkMessageBox import *
import ttk, tkFont, logging, os
import UsConfig as Config
from GUI.Dialogs.Login import Login
import Common.Constants.Singal as Signal
from Common.Notifier import Observer
from Common.Constants import DBStatus
from Common.Utilities import*
from aglyph.assembler import Assembler
from aglyph.context import XMLContext
from abc import ABCMeta, abstractmethod
from GUI.Dialogs.AddRecord import AddRecord
class MainWinI(object):
"""
Interface for the main window view
"""
__metaclass__ = ABCMeta
@abstractmethod
def getCategoryPanel(self):
"get the reference of the panel containing databases and categories"
pass
@abstractmethod
def getRecordPanel(self):
"get the reference of the panel containing records"
pass
@abstractmethod
def getTopMenu(self):
"get the reference of the top Menu"
pass
@abstractmethod
def getToolbar(self):
"get the reference of the toolbar panel"
pass
class MainWin(tk.Tk, MainWinI):
"""
Main GUI class of the app
"""
def __init__(self, controller, notifier, localization):
tk.Tk.__init__(self)
self.__controller = controller
self.__notifier = notifier
self.__localization = localization
self.__controller.addView(self)
self.__componentsRef = {}
self.init()
def init(self):
#add menu
self.addMenu()
#add data panels
self.addDataPanels()
#add bottom tool bar
self.addToolBar()
self.withdraw()
login = Config.ASSEMBLER.assemble("LoginView")
#login = Config.ASSEMBLER.assemble("AddNewDBView")
centerTopLevel(login)
#centerTopLevel(Login(self, self))
def LoginOk(self):
self.deiconify()
print "Login OK!"
def addMenu(self):
self.mainMenu = MenuMain(self, self.__notifier, self.__localization)
self.__componentsRef['top_menu'] = self.mainMenu
def addDataPanels(self):
"""Add two data panels - left for data bases and categories; and right - for the records"""
#add draggable
self.panes = tk.PanedWindow(self)
self.panes.pack(fill="both", expand="yes")
self.panes.configure(bg="#383838")
#add category panel(left)
self.categoryPanel = CategoryPanel(self.panes, self.__notifier, path=r"D:/Temp/Foto/")
self.categoryPanel.pack(expand=YES, fill=tk.BOTH)
self.panes.add(self.categoryPanel)
#add record panel(right)
self.recordPanel = RecordPanel(self.panes, self.__notifier)
self.recordPanel.pack(expand=YES, fill=tk.X)
self.panes.add(self.recordPanel)
self.__componentsRef['category_panel'] = self.categoryPanel
self.__componentsRef['record_panel'] = self.recordPanel
def addToolBar(self):
button = ttk.Button(self, text="Save")
button.pack(side=tk.LEFT, padx=2, pady=2)
button1 = ttk.Button(self, text="Save1")
button1.pack(side=tk.LEFT, padx=2, pady=2)
self.__componentsRef['toolbar'] = None
def getCategoryPanel(self):
return self.__componentsRef['category_panel']
def getRecordPanel(self):
return self.__componentsRef['record_panel']
def getTopMenu(self):
return self.__componentsRef['top_menu']
def getToolbar(self):
return self.__componentsRef['toolbar']
class CategoryPanel(tk.Frame):
"""Category data panel(from the left)"""
def __init__(self, parent, notifier, path):
tk.Frame.__init__(self, parent)
self.tree = ttk.Treeview(self)
ysb = AutoScrollbar(self, orient='vertical', command=self.tree.yview)
#xsb = ttk.Scrollbar(self, orient='horizontal', command=self.tree.xview)
self.tree.configure(yscroll=ysb.set)
self.tree.heading('#0', text='Databases', anchor='w')
#ttk.Style().configure("Treeview", background="white",
# foreground="black", fieldbackground="red", selectforeground='green')
abspath = os.path.abspath(path)
#root_node = self.tree.insert('', 'end', text=abspath, open=True)
#self.process_directory(root_node, abspath)
tk.Grid.rowconfigure(self, 0, weight=1)
tk.Grid.columnconfigure(self, 0, weight=1)
self.tree.grid(row=0,column=0, sticky=tk.N+tk.S+tk.E+tk.W)
ysb.grid(row=0,column=1, sticky=tk.N+tk.S+tk.E+tk.W)
#xsb.grid(row=1,column=0, sticky=tk.E+tk.W)
#self.tree.pack(side=tk.LEFT, expand=tk.YES, fill=tk.BOTH)
#ysb.pack(side=tk.RIGHT, fill=tk.Y, anchor=tk.E)
#Add context menu
self.tree.bind("<ButtonRelease-3>", lambda event : notifier.sendSignal(Signal.CATEGORY_RIGHT_CLICK, event))
self.tree.bind("<Double-Button-1>", lambda event : notifier.sendSignal(Signal.CATEGORY_DOUBLE_CLICK, event))
self.tree.bind("<ButtonRelease-1>", lambda event : notifier.sendSignal(Signal.CATEGORY_SINGLE_CLICK, event))
self.tree.bind("<Return>", lambda event : notifier.sendSignal(Signal.CATEGORY_DOUBLE_CLICK, event))
#self.tree.bind("<Button-1>", lambda event : notifier.sendSignal(Signal.CATEGORY_SINGLE_CLICK, event))
def process_directory(self, parent, path):
for p in os.listdir(path):
abspath = os.path.join(path, p)
isdir = os.path.isdir(abspath)
oid = self.tree.insert(parent, 'end', text=p, open=False)
if isdir:
self.process_directory(oid, abspath)
class RecordPanel(tk.Frame):
"""Record data panel(from the right)"""
def __init__(self, parent, notifier):
tk.Frame.__init__(self, parent)
self.tree = ttk.Treeview(self)
#ttk.Style().configure("Treeview", background="#EEEEEE",
# foreground="black", fieldbackground="red", selectforeground='green')
i = r'D:/uAQrp.gif'
self.root_pic3 = tk.PhotoImage(file=i)
#tree = ttk.Treeview(master, height="10")
self.tree["columns"]=("one","two", "three", "four")
self.tree.column("one", width=100 )
self.tree.column("two", width=150)
self.tree.column("three", width=100 )
self.tree.column("four", width=150)
self.tree.heading('#0', text='Site', anchor='w')
self.tree.heading('#0', text='Site', anchor='w')
#self.tree.heading("one", image=self.root_pic3, text="Username", anchor='e')
self.tree.heading("one", text="Username", anchor='w')
self.tree.heading("two", text="Email", anchor='w')
self.tree.heading("three", text="Password", anchor='w')
self.tree.heading("four", text="Comments", anchor='w')
self.tree['height'] = 20
style = ttk.Style()
helv36 = tkFont.Font(family='Helvetica',
size=8, weight='bold')
style.configure("Treeview.Heading", foreground='black', font=helv36, anchor='w')
#im = tk.PhotoImage(r'D:/uAQrp.gif')
#self.tree.insert("" , 0, '0', text="Line", values=("2A","2b", "3f", "5"), image = im)
i = r'D:/uAQrp.gif'
self.root_pic = tk.PhotoImage(file=i)
#root_node = self.tree.insert('', 'end', text=' Work Folder', open=True, image=self.root_pic)
#root_node2 = self.tree.insert('', 'end', text=' Work Folder2', image=self.root_pic)
self.tree.bind("<ButtonRelease-1>", lambda event : notifier.sendSignal(Signal.RECORD_CLICK, event))
helv36 = tkFont.Font(family='Helvetica',
size=8, weight='bold')
self.tree.bind("<Double-1>", self.on_double_click)
#Add context menu
self.tree.bind("<ButtonRelease-3>", lambda event : notifier.sendSignal(Signal.RECORD_RIGHT_CLICK, event))
#add scrollbar y
ysb = AutoScrollbar(self, orient='vertical', command=self.tree.yview)
self.tree.configure(yscroll=ysb.set)
tk.Grid.rowconfigure(self, 0, weight=1)
tk.Grid.columnconfigure(self, 0, weight=1)
self.tree.grid(row=0,column=0, sticky=tk.N+tk.S+tk.E+tk.W)
ysb.grid(row=0,column=1, sticky=tk.N+tk.S+tk.E+tk.W)
#ysb.pack(side=tk.RIGHT, fill=tk.Y, anchor=tk.E)
#self.tree.pack(side=tk.LEFT, expand=tk.YES, fill=tk.BOTH)
def on_double_click(self, event):
print "on_double_click"
class MenuMain(tk.Menu, Observer):
"""Top menu for the main app window"""
def __init__(self, parent, notifier, localization):
tk.Menu.__init__(self)
self.__parent = parent
self.__notifier = notifier
self.__localization = localization
#references for images
self.__images = {}
#references for menus(to change them via signals)
self.__menuRef = {}
self.__resourceManager = Config.ASSEMBLER.assemble("ResourceManager")
self.__createMenu()
self.__notifier.register(self)
def __createMenu(self):
top = tk.Menu(self.__parent)
self.__parent.config(menu=top)
#database menu
dbMenu = tk.Menu(top, tearoff=False)
self.__menuRef['database'] = dbMenu
dbMenu.add_command(label=self.__localization.getWord('create_db'),
command=lambda: self.__notifier.sendSignal(Signal.DB_ADD_NEW_SHOW_DIALOG, None),
underline=0,
image=self.__getImage('add_new_db_image'),
compound = LEFT)
dbMenu.add_command(label=self.__localization.getWord('import_db'),
command=lambda: self.__notifier.sendSignal(Signal.DB_ADD_EXISTING_SHOW_DIALOG, None),
underline=0,
image=self.__getImage('add_existing_db_image'),
compound = LEFT)
dbMenu.add_command(label=self.__localization.getWord('connect_db'),
command=lambda: self.__notifier.sendSignal(Signal.DB_CONNECT, None),
underline=0,
image=self.__getImage('connected_image'),
compound = LEFT,
state="disabled")
dbMenu.add_command(label=self.__localization.getWord('disconnect_db'),
command=lambda: self.__notifier.sendSignal(Signal.DB_DISCONNECT, None),
underline=0,
image=self.__getImage('disconnected_image'),
compound = LEFT,
state="disabled")
dbMenu.add_command(label=self.__localization.getWord('remove_db_from_config'),
command=lambda: self.__notifier.sendSignal(Signal.DB_REMOVE_FROM_CONFIG, None),
state="disabled")
dbMenu.add_command(label=self.__localization.getWord('remove_db_from_filesystem'),
command=lambda: self.__notifier.sendSignal(Signal.DB_REMOVE_FROM_FILE_SYSTEM, None),
underline=0, image=self.__getImage('remove_db_from_config_image'),
compound = LEFT,
state="disabled")
dbMenu.add_command(label=self.__localization.getWord('save'),
command=lambda: self.__notifier.sendSignal(Signal.SAVE_CURR_DB, None),
image=self.__getImage('save_image'),
underline=0,
compound = LEFT,
state="disabled")
dbMenu.add_command(label=self.__localization.getWord('quit'),
command=lambda: self.__notifier.sendSignal(Signal.APP_QUIT, 1),
underline=0)
top.add_cascade(label=self.__localization.getWord('main_menu_db'),
menu=dbMenu,
underline=0)
#Operations menu
operationsMenu = tk.Menu(top, tearoff=False)
#Category sub menu
categorySubmenu = tk.Menu(operationsMenu, tearoff=False)
categorySubmenu.add_command(label=self.__localization.getWord('add'),
command=lambda : self.__notifier.sendSignal(Signal.SHOW_ADD_NEW_CATEGORY_DIALOG, None),
underline=0,
image=self.__getImage('add'),
compound = LEFT,
state="disabled")
categorySubmenu.add_command(label=self.__localization.getWord('edit'),
command=lambda : self.__notifier.sendSignal(Signal.SHOW_EDIT_CATEGORY_DIALOG, None),
underline=0,
image=self.__getImage('edit'),
compound = LEFT,
state="disabled")
categorySubmenu.add_command(label=self.__localization.getWord('delete'),
command=lambda : self.__notifier.sendSignal(Signal.REMOVE_CATEGORY, None),
underline=0,
image=self.__getImage('delete'),
compound = LEFT,
state="disabled")
# Record sub menu
recordSubmenu = tk.Menu(operationsMenu, tearoff=False)
recordSubmenu.add_command(label=self.__localization.getWord('add'),
command=lambda : self.__notifier.sendSignal(Signal.SHOW_ADD_NEW_RECORD_DIALOG, None),
underline=0,
image=self.__getImage('add'),
compound = LEFT,
state="disabled")
recordSubmenu.add_command(label=self.__localization.getWord('edit'),
command=lambda : self.__notifier.sendSignal(Signal.SHOW_EDIT_RECORD_DIALOG, None),
underline=0,
image=self.__getImage('edit'),
compound = LEFT,
state="disabled")
recordSubmenu.add_command(label=self.__localization.getWord('delete'),
command=lambda : self.__notifier.sendSignal(Signal.REMOVE_RECORD, None),
underline=0,
image=self.__getImage('delete'),
compound = LEFT,
state="disabled")
operationsMenu.add_cascade(label=self.__localization.getWord('main_menu_caregory'), menu=categorySubmenu, underline=0)
operationsMenu.add_cascade(label=self.__localization.getWord('main_menu_record'), menu=recordSubmenu, underline=0)
top.add_cascade(label=self.__localization.getWord('main_menu_operations'), menu=operationsMenu, underline=0)
self.__menuRef['category'] = categorySubmenu
self.__menuRef['record'] = recordSubmenu
#settings menu
settingsMenu = tk.Menu(top, tearoff=False)
settingsMenu.add_command(label=self.__localization.getWord('user_settings'),
command=lambda: self.__notifier.sendSignal(Signal.SHOW_SETTINGS_DIALOG, None),
underline=0)
top.add_cascade(label=self.__localization.getWord('main_menu_settings'),
menu=settingsMenu,
underline=0)
#info menu
infoMenu = tk.Menu(top, tearoff=False)
infoMenu.add_command(label=self.__localization.getWord('info'),
command=lambda: self.__notifier.sendSignal(Signal.SHOW_INFO_DIALOG, None),
underline=0,
image=self.__getImage('info_image'),
compound = LEFT)
infoMenu.add_command(label=self.__localization.getWord('manual'),
command=lambda: self.__notifier.sendSignal(Signal.SHOW_MANUAL, None),
underline=0)
top.add_cascade(label=self.__localization.getWord('main_menu_info'),
menu=infoMenu,
underline=0)
def update(self, signal, data=None):
"""
Handler. Called if a signal is fired
"""
#try:
if signal == Signal.DB_SELECTED_CHANGED:
logging.info("DB changed: {}".format(data))
dbMenu = self.__menuRef['database']
categoryMenu = self.__menuRef['category']
recordMenu = self.__menuRef['record']
if not data:
#data base menu
dbMenu.entryconfig(self.__localization.getWord("connect_db"), state="disabled")
dbMenu.entryconfig(self.__localization.getWord("disconnect_db"), state="disabled")
dbMenu.entryconfig(self.__localization.getWord("remove_db_from_config"), state="disabled")
dbMenu.entryconfig(self.__localization.getWord("remove_db_from_filesystem"), state="disabled")
dbMenu.entryconfig(self.__localization.getWord("save"), state="disabled")
#operations menu
categoryMenu.entryconfig(self.__localization.getWord('add'), state="disabled")
categoryMenu.entryconfig(self.__localization.getWord('edit'), state="disabled")
categoryMenu.entryconfig(self.__localization.getWord('delete'), state="disabled")
recordMenu.entryconfig(self.__localization.getWord('add'), state="disabled")
recordMenu.entryconfig(self.__localization.getWord('edit'), state="disabled")
recordMenu.entryconfig(self.__localization.getWord('delete'), state="disabled")
else:
if data.status == DBStatus.DISCONNECTED:
#data base menu
dbMenu.entryconfig(self.__localization.getWord("connect_db"), state="normal")
dbMenu.entryconfig(self.__localization.getWord("remove_db_from_config"), state="normal")
dbMenu.entryconfig(self.__localization.getWord("remove_db_from_filesystem"), state="normal")
dbMenu.entryconfig(self.__localization.getWord("disconnect_db"), state="disabled")
dbMenu.entryconfig(self.__localization.getWord("save"), state="disabled")
#operations menu
categoryMenu.entryconfig(self.__localization.getWord('add'), state="disabled")
categoryMenu.entryconfig(self.__localization.getWord('edit'), state="disabled")
categoryMenu.entryconfig(self.__localization.getWord('delete'), state="disabled")
recordMenu.entryconfig(self.__localization.getWord('add'), state="disabled")
recordMenu.entryconfig(self.__localization.getWord('edit'), state="disabled")
recordMenu.entryconfig(self.__localization.getWord('delete'), state="disabled")
#if connected
else:
dbMenu.entryconfig(self.__localization.getWord("connect_db"), state="disabled")
dbMenu.entryconfig(self.__localization.getWord("disconnect_db"), state="normal")
dbMenu.entryconfig(self.__localization.getWord("remove_db_from_config"), state="normal")
dbMenu.entryconfig(self.__localization.getWord("remove_db_from_filesystem"), state="normal")
if data.getIfChanged():
dbMenu.entryconfig(self.__localization.getWord("save"), state="normal")
else:
dbMenu.entryconfig(self.__localization.getWord("save"), state="disabled")
#operations menu
categoryMenu.entryconfig(self.__localization.getWord('add'), state="normal")
recordMenu.entryconfig(self.__localization.getWord('add'), state="normal")
dbMenu.entryconfig(self.__localization.getWord("remove_db_from_config"), state="normal")
if signal == Signal.CATEGORY_SELECTED_CHANGED:
categoryMenu = self.__menuRef['category']
if not data:
#operations menu
categoryMenu.entryconfig(self.__localization.getWord('edit'), state="disabled")
categoryMenu.entryconfig(self.__localization.getWord('delete'), state="disabled")
else:
categoryMenu.entryconfig(self.__localization.getWord('edit'), state="normal")
categoryMenu.entryconfig(self.__localization.getWord('delete'), state="normal")
if signal == Signal.RECORD_SELECTED_CHANGED:
recordMenu = self.__menuRef['record']
if not data:
#operations menu
recordMenu.entryconfig(self.__localization.getWord('edit'), state="disabled")
recordMenu.entryconfig(self.__localization.getWord('delete'), state="disabled")
else:
#operations menu
recordMenu.entryconfig(self.__localization.getWord('edit'), state="normal")
recordMenu.entryconfig(self.__localization.getWord('delete'), state="normal")
def notdone(self):
self.__notifier.sendSignal(Signal.NOT_YET_AVAILABLE, None)
def __getImage(self, value):
if not value in self.__images.keys():
self.__images[value] = PhotoImage(file=self.__resourceManager.getResource(value))
return self.__images[value]
# root = Config.ASSEMBLER.assemble("MainApp")
# #root = MainWin()
# root.title('menu_win')
# #root.iconbitmap(default=r'D:/icon.ico')
# root.title("EasyPass")
# root.iconbitmap(default=r'D:/logo.ico')
# centerRoot(root)
#
# #Config.ASSEMBLER.assemble("AddRecordView")
#
# root.mainloop()
|
#!/usr/bin/env python
import boto
from . import logs
log = logs.getLogger(__name__)
conn = None
def init(options):
global conn
conn = boto.connect_ses(options.aws_key, options.aws_secret)
def send_email(frm, to, subject, body):
"""TODO: Docstring for send_email.
:param frm: TODO
:param to: TODO
:param subject: TODO
:param body: TODO
:returns: TODO
"""
if not isinstance(to, list):
to = [to]
if conn is None:
log.error("Email library not initialized")
conn.send_email(frm, subject, body, to)
def main(args):
from . import args
ap = args.get_parser()
ap.add_argument("frm")
ap.add_argument("to")
ap.add_argument("subject")
ap.add_argument("body")
options = ap.parse_args()
init(options)
send_email(options.frm, options.to, options.subject, options.body)
return 0
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
|
import imghdr
import os
from random import shuffle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from datetime import datetime
import pytz # So that this program's timezone is always french !
class UNet(nn.Module):
def __init__(self, **kwargs):
super().__init__()
if "kernel_size" in kwargs:
kernel_size = kwargs["kernel_size"]
else:
kernel_size = 3
self.conv1 = nn.Sequential(nn.Conv2d(4, 32, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(32, 32, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2))
self.pool1 = nn.MaxPool2d((2, 2))
self.conv2 = nn.Sequential(nn.Conv2d(32, 64, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 64, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2))
self.pool2 = nn.MaxPool2d((2, 2))
self.conv3 = nn.Sequential(nn.Conv2d(64, 128, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 128, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2))
self.pool3 = nn.MaxPool2d((2, 2))
self.conv4 = nn.Sequential(nn.Conv2d(128, 256, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 256, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2))
self.pool4 = nn.MaxPool2d((2, 2))
self.conv5 = nn.Sequential(nn.Conv2d(256, 512, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(512, 512, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2))
self.up6 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.conv6 = nn.Sequential(nn.Conv2d(512, 256, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(256, 256, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2))
self.up7 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.conv7 = nn.Sequential(nn.Conv2d(256, 128, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(128, 128, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2))
self.up8 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.conv8 = nn.Sequential(nn.Conv2d(128, 64, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(64, 64, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2))
self.up9 = nn.ConvTranspose2d(64, 32, 2, stride=2)
self.conv9 = nn.Sequential(nn.Conv2d(64, 32, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2),
nn.Conv2d(32, 32, (kernel_size, kernel_size), stride=1, padding=1),
nn.LeakyReLU(0.2))
self.conv10 = nn.Conv2d(32, 12, (1, 1), stride=1)
def forward(self, x):
conv1 = self.conv1(x)
pool1 = self.pool1(conv1)
conv2 = self.conv2(pool1)
pool2 = self.pool2(conv2)
conv3 = self.conv3(pool2)
pool3 = self.pool3(conv3)
conv4 = self.conv4(pool3)
pool4 = self.pool4(conv4)
conv5 = self.conv5(pool4)
up6 = self.up6(conv5)
up6 = torch.cat([up6, conv4], 1)
conv6 = self.conv6(up6)
up7 = self.up7(conv6)
up7 = torch.cat([up7, conv3], 1)
conv7 = self.conv7(up7)
up8 = self.up8(conv7)
up8 = torch.cat([up8, conv2], 1)
conv8 = self.conv8(up8)
up9 = self.up9(conv8)
up9 = torch.cat([up9, conv1], 1)
conv9 = self.conv9(up9)
conv10 = self.conv10(conv9)
return F.pixel_shuffle(conv10, 2)
def createAndTrainModel(**kwargs):
"""
:param dataset: The dataset you want the autoencoder to be trained on.
Dataset's __getitem__ function should return a C,H,W-shaped tensor
:param batch_size: The size of the batches (defaults to 4)
:param learning_rate: The learning rate (defaults to 1e-3)
:param num_epochs: The number of epochs (defaults to 50)
:param criterion: The Loss function (defaults to L1Loss)
:param log_file_name: (very optional) Path to a file where to store the history of the training
:param optimizer: (optional) Callable that returns an actual optimizer (i.e. a proper optimizer factory)
:param save_frequency: Save model's state every save_frequency epoch
:param run_name: (optional) The name of the run, so that you can find it in the mess of directories this program will create
:return: a fully trained model AND the latest loss computed
"""
assert "dataset" in kwargs, "No dataset provided (use dataset keyword argument)"
aeDataset = kwargs["dataset"]
if not isinstance(aeDataset, Dataset):
raise ValueError("Dataset provided is not a valid torch dataset")
if "batch_size" in kwargs:
batch_size = kwargs["batch_size"]
else:
batch_size = 4
if "run_name" in kwargs:
run_name = kwargs["run_name"]
else:
run_name = datetime.now(pytz.timezone("CET")).strftime("Run-%b-%d-%Hh%M")
if "learning_rate" in kwargs:
learning_rate = kwargs["learning_rate"]
else:
learning_rate = 1e-4
if "num_epochs" in kwargs:
num_epochs = kwargs["num_epochs"]
else:
num_epochs = 50
if "save_frequency" in kwargs:
save_frequency = kwargs["save_frequency"]
else:
save_frequency = 10
if save_frequency <= 0:
save_frequency = num_epochs # If invalid save frequency (0 or less), we only save once at the end
aeDataloader = DataLoader(aeDataset, batch_size=batch_size, pin_memory=True)
model = UNet()
model.to(device)
if "criterion" in kwargs:
criterion = kwargs["criterion"]
else:
criterion = nn.L1Loss()
if "optimizer" in kwargs:
optimizer = kwargs["optimizer"](
model.parameters()) # In case an optimizer factory is specified, let's give it this model's parameters
else:
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
lossLog = []
for epoch in range(num_epochs):
loss = 0
for in_images, gt_images in aeDataloader:
in_images = in_images.to(device)
gt_images = gt_images.to(device)
optimizer.zero_grad()
output = model(in_images)
train_loss = criterion(output,
gt_images) # The goal is to denoise, so making this look like a noisy image is maybe not the best. To be continued...
train_loss.backward()
optimizer.step()
loss += train_loss.item()
loss = loss / (len(aeDataloader))
lossLog.append((epoch, loss))
print(f"epoch [{epoch + 1}/{num_epochs}], loss:{loss:.4f}")
# We save the progress every save_frequency epoch
if (epoch + 1) % save_frequency == 0:
# Get French time stamp even if Colab's GPUs don't have consistent timezones
timeString = datetime.now(pytz.timezone("CET")).strftime("%b-%d-%Hh%M")
check_point_dict = {
"model_state_dict": model.state_dict(),
"epoch": epoch,
"optimizer_state_dict": optimizer.state_dict()
}
torch.save(check_point_dict, f"results/{run_name}_check_point_{timeString}_EPOCH_{epoch + 1}")
if "log_file_name" in kwargs:
f = open(kwargs["log_file_name"], "w")
f.write(f"Epoch,"
f"Loss,"
f"Criterion,"
f"LearningRate\n")
for epochX, lossY in lossLog:
f.write(f"{epochX}"
f",{lossY},"
f"{criterion.__class__.__name__},"
f"{learning_rate}"
f"\n")
f.close()
return model # Return the trained model, and the latest training loss it yielded
|
def inc(lis):
ini = lis[0]
c = 1
arr = []
for i in range(len(lis)):
if i == 0:
pass
elif lis[i] >= ini:
c += 1
ini = lis[i]
# print(lis[i], ini, c)
else:
ini = lis[i]
arr.append(c)
c = 1
arr.append(c)
return max(arr)
if __name__ == "__main__":
n = input().strip()
lis = list(map(int, input().strip().split(' ')))
print(inc(lis)) |
"""
helper functions for scraping
"""
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
def get_url(url):
"""
Get the url
:param url: given url
:return: raw html
"""
response = requests.Session()
retries = Retry(total=10, backoff_factor=.1)
response.mount('http://', HTTPAdapter(max_retries=retries))
try:
response = response.get(url, timeout=5)
response.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError):
return None
return response
|
# Generated by Django 2.1.3 on 2018-12-10 10:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0005_auto_20181209_0916'),
]
operations = [
migrations.AlterModelOptions(
name='backup',
options={'ordering': ['-id']},
),
]
|
import json
import requests
from django.conf import settings
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from .models import FormConfig, FormLog
from .forms import FormConfigForm
def send(request, slug):
context = {}
form_config = FormConfig.objects.get(slug=slug)
data = request.POST or None
form_config_form = FormConfigForm(data=data, form_config=form_config)
captcha_is_valid = False
if 'g-recaptcha-response' in request.POST and request.POST['g-recaptcha-response']:
url = 'https://www.google.com/recaptcha/api/siteverify'
data = {
'secret': settings.RECAPTCHA_SECRET,
'response': request.POST['g-recaptcha-response'],
}
response = requests.get(url, params=data)
if response.status_code == 200:
raw_results = response.json()
print(raw_results)
if raw_results['success']:
captcha_is_valid = True
context['RECAPTCHA_SITEKEY'] = settings.RECAPTCHA_SITEKEY
context['captcha_is_valid'] = captcha_is_valid
if request.POST and form_config_form.is_valid():
form_log = FormLog(
form_config=form_config,
ip=request.META.get('REMOTE_ADDR', None),
referrer=request.META.get('HTTP_REFERER', None),
data=json.dumps(form_config_form.cleaned_data)
)
form_log.save()
context['status'] = 'sent'
else:
context['status'] = 'error'
context['errors'] = form_config_form.errors
return HttpResponse(json.dumps(context), content_type='application/json')
def validate(request, slug):
context = {}
form_config = FormConfig.objects.get(slug=slug)
data = request.POST or None
form_config_form = FormConfigForm(data=data, form_config=form_config)
if request.POST and form_config_form.is_valid():
context['errors'] = False
else:
context['errors'] = form_config_form.errors
return HttpResponse(json.dumps(context), content_type='application/json')
|
# Generated by Django 3.0.7 on 2020-07-10 10:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assignment_project', '0003_auto_20200709_1654'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='year',
field=models.CharField(choices=[('p1', 'P1'), ('p2', 'P2'), ('e1', 'E1'), ('e2', 'E2'), ('e3', 'E3'), ('e4', 'E4')], default='e1', max_length=2),
),
migrations.AlterField(
model_name='userprofile',
name='branch',
field=models.CharField(choices=[('cse', 'CSE'), ('mech', 'MECH'), ('chem', 'CHEM'), ('ece', 'ECE'), ('mme', 'MME'), ('civil', 'CIVIL'), ('puc', 'PUC')], default='cse', max_length=5),
),
]
|
import snap
Graph = snap.GenRndGnm(snap.PNGraph, 100, 1000)
for NI in Graph.Nodes():
CloseCentr = snap.GetClosenessCentr(Graph, NI.GetId())
print ("node: %d centrality: %f" % (NI.GetId(), CloseCentr))
UGraph = snap.GenRndGnm(snap.PUNGraph, 100, 1000)
for NI in UGraph.Nodes():
CloseCentr = snap.GetClosenessCentr(UGraph, NI.GetId())
print ("node: %d centrality: %f" % (NI.GetId(), CloseCentr))
Network = snap.GenRndGnm(snap.PNEANet, 100, 1000)
for NI in Network.Nodes():
CloseCentr = snap.GetClosenessCentr(Network, NI.GetId())
print ("node: %d centrality: %f" % (NI.GetId(), CloseCentr))
|
"""
Each file that starts with test... in this directory is scanned for subclasses of unittest.TestCase or testLib.RestTestCase
"""
import unittest
import os
import testLib
class TestAddUser(testLib.RestTestCase):
"""Test adding users"""
def assertResponse(self, respData, count = 1, errCode = testLib.RestTestCase.SUCCESS):
"""
Check that the response data dictionary matches the expected values
"""
expected = { 'errCode' : errCode }
if count is not None:
expected['count'] = count
self.assertDictEqual(expected, respData)
def testNoPassword(self):
respData = self.makeRequest("/users/add", method="POST", data = { 'user' : 'user2', 'password' : ''} )
self.assertResponse(respData, count = 1)
def testNoName(self):
respData = self.makeRequest("/users/add", method="POST", data = { 'user' : '', 'password' : 'password'} )
self.assertResponse(respData, count = 1)
def testLongName(self):
respData = self.makeRequest("/users/add", method="POST", data = { 'user' : 'u'*129 , 'password' : 'password'} )
self.assertResponse(respData, count = 1, testLib.RestTestCase.ERR_BAD_USERNAME)
def testLongPass(self):
respData = self.makeRequest("/users/add", method="POST", data = { 'user' : 'user3' , 'password' : 'p' *129} )
self.assertResponse(respData, count = 1, testLib.RestTestCase.ERR_BAD_PASSWORD)
def testDupName(self):
self.makeRequest("/users/add", method="POST", data = { 'user' : 'user4' , 'password' : 'password'} )
respData = self.makeRequest("/users/add", method="POST", data = { 'user' : 'user4' , 'password' : 'password'} )
self.assertResponse(respData, count = 1, testLib.RestTestCase.ERR_USER_EXISTS)
class TestLoginUser(testLib.RestTestCase):
"""Test logging users"""
def assertResponse(self, respData, count = 1, errCode = testLib.RestTestCase.SUCCESS):
"""
Check that the response data dictionary matches the expected values
"""
expected = { 'errCode' : errCode }
if count is not None:
expected['count'] = count
self.assertDictEqual(expected, respData)
def testWrongPass(self):
self.makeRequest("/users/add", method="POST", data = { 'user' : 'user5' , 'password' : 'password'} )
respData = self.makeRequest("/users/login", method="POST", data = { 'user' : 'user5' , 'password' : 'pass'} )
self.assertResponse(respData, count = 1, testLib.RestTestCase.ERR_BAD_CREDENTIALS)
def testNotAdded(self):
respData = self.makeRequest("/users/login", method="POST", data = { 'user' : 'user6' , 'password' : 'password'} )
self.assertResponse(respData, count = 1, testLib.RestTestCase.ERR_BAD_CREDENTIALS)
|
import time
from boto import kinesis
from settings import KINESIS_REGION, KINESIS_STREAM_NAME
FLUSH_INTERVAL = 5
BATCH_SIZE = 20
running = True
def process_messages(batch_msgs):
print('messages processed: {}'.format(len(batch_msgs['Records'])))
for msg in batch_msgs['Records']:
print('message: "{}" offset: {}'.format(msg['Data'], msg['SequenceNumber']))
print('Connect to Kinesis Streams')
kinesis = kinesis.connect_to_region(region_name=KINESIS_REGION)
stream = kinesis.describe_stream(KINESIS_STREAM_NAME)
shardId = stream['StreamDescription']['Shards'][0]['ShardId']
shardIterator = kinesis.get_shard_iterator(KINESIS_STREAM_NAME, shardId, 'TRIM_HORIZON')
print('Kinesis consumer started!')
while running:
msgs = kinesis.get_records(shardIterator['ShardIterator'], limit=BATCH_SIZE)
process_messages(msgs)
shardIterator['ShardIterator'] = msgs['NextShardIterator']
print('\nnext batch in {} seconds...'.format(FLUSH_INTERVAL))
time.sleep(FLUSH_INTERVAL)
|
import simpy
import random
import statistics
import numpy as np
wait_times = []
class Restaurant(object):
def __init__(self,env, num_servers,num_cooks):
self.env = env
self.server = simpy.Resource(env, num_servers)
self.scanner = simpy.Resource(env, num_cooks)
def buy_food(self,customer):
yield self.env.timeout(np.random.exponential(1))
def cook_food(self,customer):
yield self.env.timeout(random.uniform(5,7))
def go_to_eat(env,customer,restaurant):
#Customer arrive at the restaurant
arrival_time = env.now
with restaurant.server.request() as request:
yield request
yield env.process(restaurant.buy_food(customer))
with restaurant.scanner.request() as request:
yield request
yield env.process(restaurant.cook_food(customer))
queue = env.now - arrival_time
minutes_queue, frac_minutes = divmod(queue, 1)
seconds_queue = frac_minutes * 60
print(f"The customer wait time is {minutes_queue} minutes and {seconds_queue} seconds.")
wait_times.append(env.now - arrival_time)
def run_restaurant(env, num_servers, num_cooks):
restaurant = Restaurant(env, num_servers, num_cooks)
for customer in range(10):
env.process(go_to_eat(env, customer, restaurant))
while True:
yield env.timeout(np.random.exponential(5)) #Mean interarrival rate μ1 = 0.2 minutes)
customer += 1
env.process(go_to_eat(env, customer, restaurant))
def get_user_input():
num_servers = input("Input # of cashiers: ")
num_cooks = input("Input # of cooks: ")
params = [num_servers, num_cooks]
params = [int(x) for x in params]
return params
def main():
# Setup
random.seed(69420)
num_servers, num_cooks = get_user_input()
# Run the simulation
env = simpy.Environment()
env.process(run_restaurant(env, num_servers, num_cooks))
env.run(until=100)
# View the results
average_wait = statistics.mean(wait_times)
minutes, frac_minutes = divmod(average_wait, 1)
seconds = frac_minutes * 60
print("-----------")
print(f"\nThe average wait time is {minutes} minutes and {seconds} seconds.")
#Running Simulation:
main()
|
import torch
import numpy as np
import os
import re
from torch.utils.data import Dataset
class HMDB(Dataset):
def __init__(self, _data_root, _txt_root, _load_type, _split_num, transform=None):
self._root_dir = _data_root
self._load_type = _load_type
self._txt_path = os.path.join(_txt_root, self._load_type+'_split%d.txt' %_split_num)
self._data_list = []
self._transform = transform
self.set_data_list()
def __len__(self):
return len(self._data_list)
def __getitem__(self, _idx):
data_name, label = self._data_list[_idx]
split_data_name = re.split(r"[-]*", data_name)
dir_name = split_data_name[0] + '-' + split_data_name[1]
file_path = os.path.join(self._root_dir, dir_name, data_name)
return self._transform(np.load(file_path)), label, dir_name
@staticmethod
def data_dir_reader(_txt_path):
tmp = []
f = open(_txt_path, 'r')
for line in f.readlines():
line = line.replace('\n', '')
split_line = re.split(r"[\s+,\/]*", line)
file_info = split_line[0] + "-%05d" % int(split_line[1])
tmp.append([file_info, int(split_line[-1])])
return tmp
def set_data_list(self):
cv_lists = self.data_dir_reader(self._txt_path)
if self._load_type == 'train':
final_list = []
for cv_list in cv_lists:
video_name = cv_list[0]
label = cv_list[1]
dir_path = os.path.join(self._root_dir, video_name)
for file_list in os.listdir(dir_path):
final_list.append([file_list, label])
self._data_list = final_list
elif self._load_type == 'test':
data_list = []
for cv_list in cv_lists:
video_name = cv_list[0]
label = cv_list[1]
dir_path = os.path.join(self._root_dir, video_name)
for file_list in os.listdir(dir_path):
if not(re.split('[-.]+', file_list)[-2] == 'original'):
continue
data_list.append([file_list, label])
self._data_list = data_list
else:
print("input correct train_test_type argm")
raise ValueError
if __name__ == "__main__":
data_root = "/home/jm/Two-stream_data/HMDB51/preprocess/frames"
txt_path = "/home/jm/Two-stream_data/HMDB51"
train_set = HMDB(data_root, txt_path, 'train', 1)
test_set = HMDB(data_root, txt_path, 'test', 1)
# A A
# (‘ㅅ‘=)
# J.M.Seo |
# Copyright 2016-2017 Andreas Riegg - t-h-i-n-x.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
#
# Changelog
#
# 1.0 2016-07-29 Initial release.
# 1.1 2017-01-27 File rename and comments added.
#
# Config parameters
#
# - channels Integer Number of digital channels. Default is 8.
#
# Usage remarks
#
# - You can use this device just like ordinary devices by e.g. using it in the
# config file.
# - As pure artificial device it has no hardware dependency at all. For that reason
# no bus parameter is necessary.
# - You can use this device to do some testing prior to having the real physical chips
# available to e.g. create some user interface in advance.
# - You can use this device also to check the correctness of some REST API calls
# that use the digital I/O values.
#
# Implementation remarks
#
# - For simplicity, the simulated digital device name is just the upper case version of
# the underlying digital abstraction.
#
from webiopi.devices.digital import GPIOPort
from webiopi.utils.types import toint
class DIGITAL(GPIOPort):
VALUES = []
FUNCTIONS = []
def __init__(self, channels=8):
GPIOPort.__init__(self, toint(channels))
self.VALUES = [0 for i in range(self.digitalCount())]
self.FUNCTIONS = [self.IN for i in range(self.digitalCount())]
def __str__(self):
return "DIGITAL"
def __getFunction__(self, channel):
return self.FUNCTIONS[channel]
def __setFunction__(self, channel, func):
self.FUNCTIONS[channel] = func
def __digitalRead__(self, channel):
return self.VALUES[channel]
def __portRead__(self):
val = 0
for i in range[self.digitalCount()]:
val |= self.VALUES[i] << i
return val
def __digitalWrite__(self, channel, value):
if self.FUNCTIONS[channel] == self.OUT:
self.VALUES[channel] = value
def __portWrite__(self, value):
for i in range[self.digitalCount()]:
val = (value >> i) & 0x1
self.__digitalWrite__(self, i, val)
|
wildcard_constraints:
xx = "x+"
rule recruit_round_0:
input:
seq = "initial_seq.fa",
reads = "hifi.fa"
output:
"reads_round_x.fa"
run:
shell("mummer -maxmatch -l 500 -b -threads 40 {input.seq} {input.reads} | scripts/pick_readnames_with_mums.py > picked_hifi_round0.txt"),
shell("cp picked_hifi_round0.txt picked.txt"),
shell("scripts/pick_reads_stdin.py < hifi.fa > {output}")
rule recruit_round_n:
input:
previous = "reads_round_{xx}.fa",
reads = "hifi.fa"
output:
"reads_round_{xx}x.fa"
run:
shell("MBG -i {input.previous} -o graph-hpc-rec-round{wildcards.xx}.gfa -k 501 -w 10 -a 1 -u 3"),
shell("grep -P '^S' < graph-hpc-rec-round{wildcards.xx}.gfa | awk '{{print \">\" $2; print $3;}}' > contigs_round{wildcards.xx}.fa"),
shell("mummer -maxmatch -l 500 -b -threads 40 contigs_round{wildcards.xx}.fa {input.reads} | scripts/pick_readnames_with_mums.py > picked_hifi_round{wildcards.xx}.txt"),
shell("cp picked_hifi_round{wildcards.xx}.txt picked.txt"),
shell("scripts/pick_reads_stdin.py < hifi.fa > {output}")
|
"""
ex.
[1,2,3,4,5,6,7,8]
double each item and get this
[2,4,6,8,10,12,14,16]
"""
list2 = [1,2,3,4,5,6,7,8]
result = [2*i for i in list2]
print(result)
result = [2*i for i in [1,2,3,4,5,6,7,8]]
print(result) |
color_start = '\033[34m'
color_end = '\033[0m'
def print_title(text):
print(color_start + text + color_end)
|
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn import functional as F
from torchvision.models.segmentation.deeplabv3 import DeepLabHead
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
class Deeplabv3(nn.Module):
def __init__(self,num_classes,backbone="resnet50"):
super(Deeplabv3, self).__init__()
if backbone.find("resnet50")>=0:
self.dl = models.segmentation.deeplabv3_resnet50(pretrained=False, progress=True)
elif backbone.find("resnet101")>=0:
self.dl = models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True)
else:
print("backbone for Deeplap not recognized ...\n")
exit(-1)
self.dl.classifier[4] = torch.nn.Conv2d(256, num_classes, 1)
# self.dl.classifier = DeepLabHead(2048, num_classes)
self.dl.classifier[0].project[3]=nn.Dropout(p=0, inplace=False)
def forward(self, x):
x = self.dl(x)['out']
# x_softmax = F.softmax(x, dim=1)
return x#, x_softmax
class Deeplabv3_GRU_ASPP(nn.Module):
def __init__(self,num_classes,backbone="resnet50"):
super(Deeplabv3_GRU_ASPP, self).__init__()
if backbone.find("resnet50")>=0:
self.dl = models.segmentation.deeplabv3_resnet50(pretrained=True, progress=True)
elif backbone.find("resnet101")>=0:
self.dl = models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True)
else:
print("backbone for Deeplap not recognized ...\n")
exit(-1)
self.dl.classifier[4] = torch.nn.Conv2d(256, num_classes, 1)
self.in_channels = self.dl.classifier[0].convs[4][1].in_channels
self.out_channels = self.dl.classifier[0].convs[4][1].out_channels
self.dl.classifier[0].convs[4]=ASPPPooling_GRU(self.in_channels, self.out_channels) #this is where the ASPPPooling happening
def forward(self, x):
x = self.dl(x)['out']
# x_softmax = F.softmax(x, dim=1)
return x#, x_softmax
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU())
def forward(self, x):
size = x.shape[-2:]
for index,mod in enumerate(self):
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPPPooling_GRU(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling_GRU, self).__init__(
# nn.AdaptiveAvgPool2d(1),
nn.GRU(input_size=1,hidden_size=1,batch_first=True),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU())
def forward(self, x):
size = x.shape[-2:]
for index,mod in enumerate(self):
if index==0:#GRU model do the following
x_gru = []
for batch in x: # for each image do
chennel_size = batch.shape[0]
batch = batch.view(chennel_size,-1,1)
gru_output, h = mod(batch)
# print(h.view(-1).equal(gru_output[:,-1].view(-1)))
x_gru.append(h.squeeze(0))
x = torch.stack(x_gru,dim=0).unsqueeze(-1)
else:
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class Deeplabv3_GRU_CombineChannels(nn.Module):
def __init__(self,num_classes,backbone="resnet50"):
super(Deeplabv3_GRU_CombineChannels, self).__init__()
if backbone.find("resnet50")>=0:
self.dl = models.segmentation.deeplabv3_resnet50(pretrained=False, progress=True)
elif backbone.find("resnet101")>=0:
self.dl = models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True)
else:
print("backbone for Deeplap not recognized ...\n")
exit(-1)
self.dl.classifier[4] = torch.nn.Conv2d(256, num_classes, 1)
out_channels = self.dl.classifier[0].project[0].out_channels
self.project_gru = nn.Sequential(
# nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
Project_GRU(out_channels),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Dropout(0.5))
self.dl.classifier[0].project=self.project_gru
def forward(self, x):
x = self.dl(x)['out']
# x_softmax = F.softmax(x, dim=1)
return x#, x_softmax
class Project_GRU(nn.Module):
def __init__(self,out_channels):
super(Project_GRU, self).__init__()
self.out_channels = out_channels
self.gru = nn.GRU(input_size=out_channels, hidden_size = out_channels) #the input is a pixel in the combined features with C=2048 "Seqlenth"
def forward(self, x):
feature_maps= [] #feature map for each image (batch,C,H,W)
for image in x:#for each image in the batch do
image_shape = image.shape # (C,H,W)
image = image.view(5, self.out_channels, *image_shape[1:]) # (5,256, H, W)
image = image.view(5, self.out_channels, -1) # (5,256, HW)
image = image.permute(0, 2, 1) # (5,HW,256) (seq_len=C, batch=HW, input_size=256)
# image = image.view(image_shape[0],-1).unsqueeze(dim=-1) #image =(C,HW,1)=>(seq_len=C, batch=HW, input_size=1) one pixel for each seq
ouput, h = self.gru(image) # h =(1, batch=HW, hidden_size=out_channels)
# (1,HW,out_channels)=>(HW,out_channels)=>(out_channels,HW)=>(out_channels,H, W)
h = h.squeeze().transpose(1,0).view(-1,*image_shape[1:])
feature_maps.append(h)
x = torch.stack(feature_maps)
return x
class Deeplabv3_GRU_ASPP_CombineChannels(nn.Module):
def __init__(self,num_classes,backbone="resnet50"):
super(Deeplabv3_GRU_ASPP_CombineChannels, self).__init__()
if backbone.find("resnet50")>=0:
self.dl = models.segmentation.deeplabv3_resnet50(pretrained=False, progress=True)
elif backbone.find("resnet101")>=0:
self.dl = models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True)
else:
print("backbone for Deeplap not recognized ...\n")
exit(-1)
self.dl.classifier[4] = torch.nn.Conv2d(256, num_classes, 1)
self.in_channels = self.dl.classifier[0].convs[4][1].in_channels
self.out_channels = self.dl.classifier[0].convs[4][1].out_channels
self.dl.classifier[0].convs[4]=ASPPPooling_GRU(self.in_channels, self.out_channels) #this is where the ASPPPooling happening
out_channels = self.dl.classifier[0].project[0].out_channels
self.project_gru = nn.Sequential(
# nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
Project_GRU(out_channels),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Dropout(0.5))
self.dl.classifier[0].project = self.project_gru
def forward(self, x):
x = self.dl(x)['out']
# x_softmax = F.softmax(x, dim=1)
return x#, x_softmax
class Deeplabv3_LSTM(nn.Module):
def conv_layer(self,in_channels):
return nn.Sequential(nn.Conv2d(in_channels=in_channels,out_channels=64,kernel_size=3,padding=1), nn.BatchNorm2d(64), nn.AdaptiveMaxPool2d(1))
def __init__(self,num_classes,backbone="resnet50"):
super(Deeplabv3_LSTM, self).__init__()
if backbone.find("resnet50")>=0:
self.dl = models.segmentation.deeplabv3_resnet50(pretrained=True, progress=True)
elif backbone.find("resnet101")>=0:
self.dl = models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True)
else:
print("backbone for Deeplap not recognized ...\n")
exit(-1)
set_parameter_requires_grad(self.dl,True)
self.num_classes = num_classes
# self.dl.classifier[4] = torch.nn.Conv2d(256, num_classes, 1)
self.segmentation_LSTM = nn.LSTM(input_size=64,hidden_size=3456) #input(64*36*48) given input image size of cvc-clinicDB (288,384)
self.layers = list(self.dl.children()) # => len(layers) = 2 before ASPP and After ASPP
self.intermediatelayers_layers = list(self.layers[0].children()) # we want the feature map after index 8 and before index 8
self.convSeq1 = self.conv_layer(1024)
self.convSeq2 = self.conv_layer(2048)
self.afterASPP = self.layers[1][0]
self.convSeq3 = self.conv_layer(256)
# print(len(self.intermediatelayers_layers))
# print(self.layers,len(self.layers))
# exit(0)
def forward(self, x):
image_shape = x.shape[-2:]
batch_size = x.shape[0]
input_for_lstm = []
for i in range(len(self.intermediatelayers_layers)):
x = self.intermediatelayers_layers[i](x)
if i >= 6:#if we reached the last intermediate layer
input_for_lstm.append(x)
x = self.afterASPP(x)
intermediate_hight_width = x.shape[-2:]
# print("self.convSeq1(input_for_lstm[0])=",self.convSeq1(input_for_lstm[0]).shape)
# image_shape = input_for_lstm[0].shape
input_for_lstm[0] = self.convSeq1(input_for_lstm[0]).flatten(start_dim=1)
input_for_lstm[1] = self.convSeq2(input_for_lstm[1]).flatten(start_dim=1)
input_for_lstm.append(self.convSeq3(x).flatten(start_dim=1))
# [print(j.shape) for j in input_for_lstm]
# exit(0)
# print(x.shape)
stacked_input = torch.stack(input_for_lstm) # ==> the output is (seq=3,batch, input)
# print("stacked_input.shape=",stacked_input.shape)
# exit(0)
output, (hidden, cell) = self.segmentation_LSTM(stacked_input) # ==> hidden.shape = (1, batch size, hidden_size)
# print(hidden.squeeze().shape)
# exit(0)
x=hidden.squeeze().view(batch_size,self.num_classes,*intermediate_hight_width)
x = nn.functional.interpolate(x,size=image_shape,mode='bilinear', align_corners=False)
return x#, x_softmax |
from flask import Flask
import folium
import pandas as pd
from flask import render_template
from flask import request
from flask import redirect
import daten
from folium import plugins
from folium.plugins import MeasureControl
from folium.plugins import FloatImage
from natsort import natsorted, ns
from operator import attrgetter
import numpy as np
import matplotlib.pyplot as plt
from plotly.offline import plot
import plotly.graph_objects as go
import plotly.express as px
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST']) #Formular erstellen
def kilometer_speichern(): #Einträge im Formularfeld Name und Kilometer werden im Json gespeichert
if request.method == 'POST':
kilometer = request.form['kilometer']
nachname = request.form['nachname']
nachname, kilometer = daten.kilometer_speichern(kilometer, nachname)
return redirect('/ranking') #Wenn man auf den Button "Speichern" klickt, gelangt man auf die Seite mit dem Ranking
return render_template("start1.html")
def data(): # Datenherkunft Barplot
data = daten.kilometer_laden() #Daten aus dem Json fürs Ranking werden für den Barplot verwendet
data_df = pd.DataFrame.from_dict(data, orient="index")
data_df=data_df.reset_index() #Index entfernen, damit 0 nicht der erste Eintrag ist
data_df.columns=["Kilometer", "Name"]
data_df=data_df.sort_values(by=["Kilometer"], ascending = False) #Erzeugung aufsteigender Reihenfolge der Balken
return data_df
def viz(): #Erstellung Diagramm
data_df = data()
fig = px.bar(
data_df,
x=data_df.Kilometer, y=data_df.Name,
orientation='h',
height=400,
width=800
)
fig.layout.template = 'plotly_white'
div = plot(fig, output_type="div") #gibt Diagramm als String zurück, der für die Erstellung des Diagrammes erforderlich ist
return div
@app.route('/ranking') #Erstellung Tabelle für Ranking
def ranking():
kilometer = daten.kilometer_laden()
sortiert=natsorted(kilometer.items()) #ermöglicht Sortierung der Werte "Kilometer"
nummeriert=enumerate(sortiert,start = 1) #Erzeugung Nummierierung für die Spalte "Rang"
div = viz()
return render_template("ranking1.html", nummeriert=nummeriert,viz_div=div) #Anzeigen des Ranikngs und des Diagrammes im HTML ranking1.html
@app.route('/karte') #Anzeigen Karte
def index():
folium_map = folium.Map(
location=[46.8667, 8.2333], # Zentrierung der Karte auf den Mittelpunkt der Schweiz. Ortschaft Sachseln
zoom_start=8,
tiles= "https://server.arcgisonline.com/ArcGIS/rest/services/Canvas/World_Light_Gray_Base/MapServer/tile/{z}/{y}/{x}", #Karten Design
attr="Tiles © Esri — Esri, DeLorme, NAVTEQ",
)
#Koordinaten in Pandas speichern
data = pd.DataFrame({
'lat':[47.390434, 47.330769, 47.385849, 47.486614, 47.559601, 46.947922, 46.806403, 46.20222, 47.04057, 46.84986, 47.36493, 47.05048, 46.99179, 46.95805, 46.89611, 47.42391, 47.69732, 47.02076, 47.20791, 47.55776, 46.19278, 46.88042, 46.22739, 46.51600, 47.17242, 47.36667],
'lon':[8.045701, 9.41104, 9.27884, 7.733427, 7.588576, 7.444608, 7.153656, 6.14569, 9.06804, 9.53287, 7.34453, 8.30635, 6.931000, 8.36609, 8.24531, 9.37477, 8.63493, 8.65414, 7.53714, 8.89893, 9.01703, 8.64441, 7.35559, 6.63282, 8.51745, 8.55000],
'name':["Aarau", "Appenzell", "Herisau", "Liestal", "Basel", "Bern", "Fribourg", "Genf", "Glarus", "Chur", "Delsberg", "Luzern", "Neuenburg", "Stans", "Sarnen", "Sankt Gallen", "Schaffhausen", "Schwyz", "Solothurn", "Frauenfeld", "Bellinzona", "Altdorf", "Sion", "Lausanne", "Zug", "Zurich"]
}) #Koordinaten für Marker festlegen
#Marker setzen
for i in range(0,len(data)):
folium.Marker([data.iloc[i]['lat'], data.iloc[i]['lon']], popup=folium.Popup(data.iloc[i]['name'], show=True, sticky=True), tooltip =data.iloc[i]["name"]) .add_to(folium_map) #Hoovereffekt der den Ortsnamen anzeigt und Marker der permanent ist
folium_map.add_child(MeasureControl()) #Hinzufügen des Plugins, mit dem man die Strecke bemessen kann
url = ('https://media.licdn.com/mpr/mpr/shrinknp_100_100/AAEAAQAAAAAAAAlgAAAAJGE3OTA4YTdlLTkzZjUtNDFjYy1iZThlLWQ5OTNkYzlhNzM4OQ.jpg')
FloatImage(url, bottom=5, left=85).add_to(folium_map)
folium_map.save('templates/map.html') #Erzeugung der Map als HTML-Datei, damit man sie mit einem iframe in die Seite einbinden kann
return render_template('start1.html') #Anzeigen der Map auf der Startseite
@app.route('/map') #Erstellen approute für Map als HTML
def map():
return render_template('map.html')
@app.route("/creators") #Erstellen approute für Seite Creators, damit man sie mit Jinja verlinken kann
def creators():
return render_template("creators.html")
if __name__ == "__main__":
app.run(debug=True, port=5000)
|
#Write a program that prints the numbers from 1 to 100
#But for multiples of three it will print “Fizz” instead of the number.
#For the multiples of five it will print “Buzz” and For multiples
#of both three and five it will print “FizzBuzz” .
def print_number(n):
''' print 'Fizz'for multiples of three,print'Buzz'for multiples
of five and print 'FizzBuzz' for multiples of three and five'''
for i in range(1,n):
if i%3==0 and i%5==0:
print("FizzBuzz")
elif i%3==0:
print("Fizz")
elif i%5==0 :
print("Buzz")
else:
print(i)
print_number(101)
|
#! /usr/bin/python3
import requests
from bs4 import BeautifulSoup
import os
import sys
import csv
import re
from collections import defaultdict
from urlWordsExtractor import dataExtractor
import nltk
from nltk import pos_tag
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
def getUrlFtrVector(wordList, ftrWords):
ftrVector = [0]*len(ftrWords)
for word in wordList:
if word != "":
word = word.lower()
tag = pos_tag(word_tokenize(word))[0][1][0].lower()
if tag in ['a','v','r']:
lemmatizedWord = (lemmatizer.lemmatize(word,tag))
else:
lemmatizedWord = lemmatizer.lemmatize(word)
if lemmatizedWord in ftrWords:
ftrVector[ftrWords.index(lemmatizedWord)] = 1
return ftrVector
def readFtrWords(fileCount, thMin, thMax):
ftrWords = []
with open(fileCount, "r") as csvfileIn:
reader = csv.reader(csvfileIn)
for row in reader:
if thMin <= float(row[1]) <= thMax and row[0] not in ftrWords:
ftrWords.append(row[0])
return ftrWords
classDict = {
"Adult":0,
"Arts":1,
"Computers":2,
"Health":3,
"otpothr":4,
"tickets":5,
"prsonal":6,
}
wordFile = "wordsPerCategory/words_dmoz.txt"
regexp = '\W+'
wordount = defaultdict(int)
lemmatizer = WordNetLemmatizer()
stopwords = set(nltk.corpus.stopwords.words('english'))
ftrWords = readFtrWords(wordFile, 0.30, 1.0)
def featureExtractor(category):
urls = dataExtractor(category)
with open("../Features/ftr_"+category+".txt", "w+") as fileOut:
classNo = classDict[category]
print("Extracting url data...")
for url in urls:
urlWords = []
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
metas = soup.find_all('meta')
title = soup.find_all('title')
for meta in metas:
if 'name' in meta.attrs:
if meta.attrs['name'] == 'description':
urlWords += re.split(regexp,meta.attrs['content'])
if meta.attrs['name'] == 'keywords':
urlWords += re.split(regexp,meta.attrs['content'])
if 'property' in meta.attrs:
if meta.attrs['property'] == 'og:description':
urlWords += re.split(regexp,meta.attrs['content'])
if meta.attrs['property'] == 'og:keywords':
urlWords += re.split(regexp,meta.attrs['content'])
urlWords += re.split(regexp,title[0].string)
print("Creating Feature Vector...")
urlFtrVector = getUrlFtrVector(urlWords,ftrWords)
fileOut.write(str([urlFtrVector,classNo]))
fileOut.write('\n')
featureExtractor("Health") |
#Analyze the proportion of features I have in my data (G and S)
import sys
import statistics as stat
f = open("../data/70aadata.txt","r")
f = f.read().splitlines()
structureset = []
for i in range(2, len(f), 3):
structureset.append(f[i])
total = len(structureset)
countG = 0
numberofS = []
for eachstruc in structureset:
theset = set([i for i in eachstruc])
if 'S' not in theset:
countG += 1
if 'S' in theset:
countS = 0
for i in eachstruc:
if i == 'S':
countS += 1
numberofS.append(countS)
print ("Proteins containing only G:", countG)
print ("Proteins containing G and S:", (total-countG))
print ("Signal peptide average lenght: ", stat.mean(numberofS))
#print (numberofS)
with open ('//home/u2195/Desktop/Dropbox/Bioinformatics_projects/results'+ '/' + 'ProtAnalysis_averageSignalPep' + '.txt', 'w')as b:
b.write("Proteins containing only G:"+ str(countG)+'\n')
b.write("Proteins containing G and S:"+ str(total-countG)+'\n')
b.write("Signal peptide average lenght: " + str(stat.mean(numberofS)))
b.close
|
#coding:utf-8
from django.http import HttpResponse
from django.shortcuts import render
from django.http import JsonResponse
#from .forms import AddForm
import nltk
import re
import CRFPP
def index(request):
return render(request, 'home.html')
def process(request):
NERtext = request.GET['NERtext2']
input_text = nltk.sent_tokenize(NERtext)
#segWord 为分词结果 eg.: ["Number", "of", "glucocorticoid", "receptors", "in", "lymphocytes", "and", "their", "sensitivity", "to", "hormone", "action"]
segWord = []
for line in input_text:
segWord += nltk.word_tokenize(line)
#exFeature为特征选择后的结果 eg.:[["Number", "NNP", 0, 0, 1, 0, 0, 0, 0, 0, 0, "Aaaa", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],["of",...]...]
exFeature = featureExtract(segWord)
#特征提取要做3次,分别得到selRNAFeature,selDNAFeature,selcellFeature,
selRNAFeature = featureSelect(exFeature,'RNA')
selDNAFeature = featureSelect(exFeature,'DNA')
selcellFeature = featureSelect(exFeature,'cell')
#3个result为使用crfpp
rnaresult = crfTest(selRNAFeature,'RNA')
dnaresult = crfTest(selDNAFeature,'DNA')
cellresult = crfTest(selcellFeature,'cell')
mergelst = zip(rnaresult,dnaresult,cellresult)
totalresult = mergeClassifier(mergelst)
output = {
'segWord':segWord,
'exFeature':exFeature,
'selRNAFeature':selRNAFeature,
'selDNAFeature':selDNAFeature,
'selcellFeature':selcellFeature,
'rnaresult':rnaresult,
'dnaresult':dnaresult,
'cellresult':cellresult,
'totalresult':totalresult
}
return JsonResponse(output)
def featureExtract(lst):
POSTagsList = getPOSTags(lst)
outlst = []
i = 0
for token in lst:
tempList =[]
tempList.append(token)
tempList.append(POSTagsList[i])
tempList.append(getRegExBool ("\-", token)) #Hyphen in token
tempList.append(getRegExBool (",", token)) #Comma in token
tempList.append(getRegExBool ("[A-Z]", token)) #Cap letter in token
tempList.append(getRegExBool ("[0-9]", token)) #Number in token
tempList.append(getRegExBool ('\\\\', token)) #Backslash in token
tempList.append(getRegExBool (":", token)) #Colon in token
tempList.append(getRegExBool (";", token)) #Semicolon in token
tempList.append(getRegExBool ('\[', token)) #Bracket in token (note I assume that if left bracket occurs then right bracket also occurs)
tempList.append(getRegExBool ('\(', token)) #Parenthese in token (note I assume that if left Paren occurs then right Paren also occurs)
tempList.append(getWordShape(token))
tempList.append(getRegExNoCaseBool ('alpha|beta|gamma|delta|epsilon|zeta|theta|kappa|lambda', token)) #GreekLetter in token
tempList.append(getRegExNoCaseBool ('rna', token)) #RNA in token
tempList.append(getRegExNoCaseBool ('cell', token)) #Cell letter in token
tempList.append(getRegExNoCaseBool ('gene', token)) #Gene in token
tempList.append(getRegExNoCaseBool ('jurkat', token)) #Jurkat in token
tempList.append(getRegExNoCaseBool ('transcript', token)) #Transcript in token
tempList.append(getRegExNoCaseBool ('factor', token)) #Factor in token
tempList.append(getRegExNoCaseBool ('prot|mono|nucle|integr|macro|il\-', token)) #Common string associated with RNA, DNA, etc
tempList.append(getRegExNoCaseBool ('alpha|beta|gamma|delta|epsilon|zeta|theta|kappa|lambda|rna|cell|gene|jurkat|transcript|factor|prot|mono|nucle|integr|macro|il\-' , token)) #Any above mentioned Lexical features
tempList.append(getCapLetterByselfBool(token))
outlst.append(tempList)
return outlst
def getPOSTags(token):
POSTagsTuple = nltk.pos_tag(token)
POSTagsList = []
for item in POSTagsTuple:
POSTagsList.append(item[1])
return POSTagsList
###### Generic RegEx test and return 0 or 1 to print to file ####
def getRegExBool (regex, token):
return int(bool(re.search(regex,token)))
###### Generic "Ignore case" of letters RegEx test and return 0 or 1 to print to file ####
def getRegExNoCaseBool (regex, token):
return int(bool(re.search(regex, token, flags = re.IGNORECASE)))
def getWordShape(token):
wordShape = re.sub('[A-Z]', 'A', token, flags=0)
wordShape = re.sub('[a-z]', 'a', wordShape, flags=0)
wordShape = re.sub('aaaa+', 'aaa', wordShape, flags=0)
wordShape = re.sub('[0-9]', 'd', wordShape, flags=0)
wordShape = re.sub('\W', '_', wordShape, flags=0)
return wordShape
##### IS Capital Letter by itself? #####
def getCapLetterByselfBool(token):
#returns bool 1 for true if token is an individual capital letter
if len(token) == 1 and re.match( '[A-Z]' , token , flags = 0):
return 1
else:
return 0
def loadFeatures(file_name):
dct = {}
with open(file_name) as f:
for line in f.readlines():
cur_line = line.strip().split('\t')
dct[cur_line[0]] = cur_line[1]
return dct
def featureSelect(lst,feature):
featuresdct = loadFeatures('bioNER/static/totalfeatures.txt')
#扩展lst的特征到1653维
new_lst = changePOSandWS(lst,featuresdct)
if feature == "DNA":
dnafeaturedct = loadFeatures('bioNER/static/selectedfeatures-20-DNA-protein.txt')
dna_lst =[]
for itm in new_lst:
tmp =[]
tmp.append(itm[0])
for index in dnafeaturedct.itervalues():
tmp.append(itm[int(index)])
dna_lst.append(tmp)
#返回Chi测度最高的前20特征
return dna_lst
elif feature == "RNA":
rnafeaturedct = loadFeatures('bioNER/static/selectedfeatures-20-RNA-protein.txt')
rna_lst =[]
for itm in new_lst:
tmp =[]
tmp.append(itm[0])
for index in rnafeaturedct.itervalues():
tmp.append(itm[int(index)])
rna_lst.append(tmp)
return rna_lst
elif feature == "cell":
cellfeaturedct = loadFeatures('bioNER/static/selectedfeatures-20-cell_type-cell_line.txt')
cell_lst =[]
for itm in new_lst:
tmp =[]
tmp.append(itm[0])
for index in cellfeaturedct.itervalues():
tmp.append(itm[int(index)])
cell_lst.append(tmp)
return cell_lst
def changePOSandWS(lst,featuredct):
new_lst = []
for x in lst:
pos = x[1]
ws = x[11]
x = x[:1] + [0 for i in range(44)] + x[2:11] + [0 for j in range(1589)] + x[12:]
try:
tmp = featuredct[x[1]]
x[tmp] = '1'
except KeyError:
pass
try:
tmp = featuredct[x[11]]
x[tmp] = '1'
except KeyError:
pass
# ipdb.set_trace()
new_lst.append(x)
return new_lst
def crfTest(lst,model_name):
if model_name == "DNA":
tagger = CRFPP.Tagger("-m bioNER/static/model-5-20-DNA-protein")
elif model_name == "RNA":
tagger = CRFPP.Tagger("-m bioNER/static/model-5-20-RNA-protein")
elif model_name == "cell":
tagger = CRFPP.Tagger("-m bioNER/static/model-5-20-cell-cell")
for line in lst:
tagger.add('\t'.join(map(str,line)))
tagger.parse()
ysize = tagger.ysize()
size = tagger.size()
xsize = tagger.xsize()
taglst = [tagger.y2(i) for i in range(len(lst))]
result = []
for i in range(len(lst)):
tmp = [ lst[i][0],taglst[i] ]
result.append(tmp)
return result
def mergeClassifier(lst):
newlst = []
for itm in lst:
tmp = []
if itm[0][-1] == '0' and itm[1][-1] == '0' and itm[2][-1] == '0':
tmp= itm[0]
elif itm[0][-1] != '0':
tmp = itm[0]
elif itm[1][-1] != '0':
tmp = itm[1]
elif itm[2][-1] != '0':
tmp = itm[2]
newlst.append(tmp)
return newlst
|
from unittest.mock import Mock
import pytest
from django.contrib.auth.models import AbstractUser
from ..models import CustomUser
class TestCustomUser:
def test_custom_user_inherits_from_abstract(self):
assert issubclass(CustomUser, AbstractUser)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 23 17:28:53 2020
@author: shkim
"""
"""
## 차원 감소(Dimensionality Reduction)
## SVD(Singular Value Decomposition, 특잇값분해)
* np.linalg.svd() 사용
"""
#%%
import numpy as np
import sys
sys.path.append('../../')
from myutils.util import preprocess, create_co_matrix, ppmi
text = 'You say goodbye and I say hello.'
corpus, word_to_id, id_to_word = preprocess(text)
vocab_size = len(word_to_id)
C = create_co_matrix(corpus, vocab_size)
W = ppmi(C, verbose=False)
U, S, V = np.linalg.svd(W) # (7, 7) (7,) (7, 7)
print(U.shape, S.shape, V.shape)
np.set_printoptions(precision=3)
print(C[0]) # [0 1 0 0 0 0 0]
print(W[0]) # [0. 1.8073549 0. 0. 0. 0. 0. ]
print(U[0]) # [-3.4094876e-01 -1.1102230e-16 -3.8857806e-16 -1.2051624e-01 0.0000000e+00 9.3232495e-01 2.2259700e-16]
# 희소벡터 W가 밀집벡터 U로 변경됨
print(U[0, :2]) # [-3.4094876e-01 -1.1102230e-16]
# 밀집벡터의 차원을 감소시키려면, 예를들어 2차원 벡터로 줄이려면 단순히 처음 두 원소를 꺼내면 됨
#%%
print(S)
print(U[:])
print(V[:])
#%%
"""
* 각 단어를 2차원벡터로 표현한 후 그래프로 그려보자.
"""
import matplotlib.pyplot as plt
plt.scatter(U[:,0], U[:,1], alpha=0.5)
for word, word_id in word_to_id.items():
print(word)
plt.annotate(word, (U[word_id, 0], U[word_id, 1]))
plt.show()
#%%
"""
* 각 단어를 2차원벡터로 표현한 후 그래프로 그려보자.
"""
import matplotlib.pyplot as plt
for word, word_id in word_to_id.items():
plt.scatter(U[:,0], U[:,1], alpha=0.5)
print(word)
plt.annotate(word, (U[word_id, 0], U[word_id, 1]))
plt.show()
input('Enter to continue..')
#%%
|
class Dog():
def __del__(self):
print("对象被干掉了")
dog1 = Dog()
del dog1
dog2 = Dog()
dog3 = Dog()
del dog2
del dog3
print("程序结束了")
|
import numpy as np
from ..util import _is_na
from anndata import AnnData
import pandas as pd
from typing import Union
from ..io._util import _check_upgrade_schema
@_check_upgrade_schema()
def alpha_diversity(
adata: AnnData,
groupby: str,
*,
target_col: str = "clone_id",
inplace: bool = True,
key_added: Union[None, str] = None
) -> pd.DataFrame:
"""Computes the alpha diversity of clonotypes within a group.
Uses the `Shannon Entropy <https://mathworld.wolfram.com/Entropy.html>`__ as
diversity measure. The Entrotpy gets
`normalized to group size <https://math.stackexchange.com/a/945172>`__.
Ignores NaN values.
Parameters
----------
adata
Annotated data matrix
groupby
Column of `obs` by which the grouping will be performed.
target_col
Column on which to compute the alpha diversity
inplace
If `True`, add a column to `obs`. Otherwise return a DataFrame
with the alpha diversities.
key_added
Key under which the alpha diversity will be stored if inplace is `True`.
Defaults to `alpha_diversity_{target_col}`.
Returns
-------
Depending on the value of inplace returns a DataFrame with the alpha diversity
for each group or adds a column to `adata.obs`.
"""
# Could rely on skbio.math if more variants are required.
def _shannon_entropy(freq):
"""Normalized shannon entropy according to
https://math.stackexchange.com/a/945172
"""
np.testing.assert_almost_equal(np.sum(freq), 1)
if len(freq) == 1:
# the formula below is not defined for n==1
return 0
else:
return -np.sum((freq * np.log(freq)) / np.log(len(freq)))
ir_obs = adata.obs.loc[~_is_na(adata.obs[target_col]), :]
clono_counts = (
ir_obs.groupby([groupby, target_col], observed=True)
.size()
.reset_index(name="count")
)
diversity = dict()
for k in sorted(ir_obs[groupby].unique()):
tmp_counts = clono_counts.loc[clono_counts[groupby] == k, "count"].values
tmp_freqs = tmp_counts / np.sum(tmp_counts)
diversity[k] = _shannon_entropy(tmp_freqs)
if inplace:
key_added = "alpha_diversity_" + target_col if key_added is None else key_added
adata.obs[key_added] = adata.obs[groupby].map(diversity)
else:
return pd.DataFrame().from_dict(diversity, orient="index")
|
import socket
import sys
import time
total_bytes_sent = 0
total_messages_sent = 0
protocol = sys.argv[1]
stop_and_wait = int(sys.argv[2])
print('protocol ', protocol)
large_buffer = 'large_buffer'
large_buffer *= 100
large_buffer_size = len(large_buffer.encode('utf-8'))
mb_10 = 10485760
mb_500 = mb_10 * 50
mb_1000 = mb_500 * 2
mb_50 = mb_10 * 5
mb_100 = mb_10 * 10
if protocol == 'tcp':
print('using tcp')
# Create a TCP/IP socket
print('creating socket')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ('172.19.90.35', 10000)
# server_address = ('localhost', 10000)
print('connecting to server')
sock.connect(server_address)
print('connected to server')
transmission_start_time = time.time()
nr_messages = int(mb_1000 / large_buffer_size)
for message in range(nr_messages):
if stop_and_wait == 1:
print(str(message) + '/' + str(nr_messages))
bytes_sent = sock.send(large_buffer.encode('utf-8'))
total_bytes_sent += bytes_sent
total_messages_sent += 1
ack = False
while not ack:
response = sock.recv(9999)
ack = True
else:
print(str(message) + '/' + str(nr_messages))
total_messages_sent += 1
bytes_sent = sock.send(large_buffer.encode('utf-8'))
total_bytes_sent += bytes_sent
else:
print('using udp')
transmission_start_time = time.time()
UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
nr_messages = int(mb_1000 / large_buffer_size)
for message in range(nr_messages):
print(str(message) + '/' + str(nr_messages))
bytes_sent = UDPClientSocket.sendto(large_buffer.encode('utf-8'), ('172.19.90.35', 10000))
# bytes_sent = UDPClientSocket.sendto(large_buffer.encode('utf-8'), ('localhost', 10000))
total_messages_sent += 1
total_bytes_sent += bytes_sent
# UDPClientSocket.sendto(b"stop", ('localhost', 10000))
UDPClientSocket.sendto(b"stop", ('172.19.90.35', 10000))
print('used ' + protocol)
print('total messages sent ' + str(total_messages_sent))
print('total bytes sent ' + str(total_bytes_sent))
print("transmission time %.2f seconds " % (time.time() - transmission_start_time))
print("Buffer size " + str(large_buffer_size))
|
import boto3
dynamodb = boto3.resource('dynamodb')
dynamodb = boto3.resource('dynamodb', region_name='eu-west-1')
table = dynamodb.Table('product-active')
table.meta.client.get_waiter('table_exists').wait(TableName='product-active')
aliasesResponse = table.scan()
f = open('sample_outputs/product-active.txt', 'w')
names = []
for item in aliasesResponse['Items']:
active_substances = item['active']
product_name = item['product'].lower()
active_substances = [actvsub.lower() for actvsub in active_substances]
product_name = product_name
names.append([product_name, active_substances])
f.write(str(names))
f.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Marco Torreggiani'
'''
Credits: Programming Challenges by Steven S. Skiena and Miguel A. Revilla
The problem is as follows: choose a number, reverse its digits and add it to the original.
If the sum is not a palindrome (which means, it is not the same number from left to right and right to left), repeat this procedure.
For example:
195 (initial number) + 591 (reverse of initial number) = 786
786 + 687 = 1473
1473 + 3741 = 5214
5214 + 4125 = 9339 (palindrome)
In this particular case the palindrome 9339 appeared after the 4th addition. This method leads to palindromes in a
few step for almost all of the integers. But there are interesting exceptions.
196 is the first number for which no palindrome has been found.
It is not proven though, that there is no such a palindrome.
INPUT SAMPLE:
Your program should accept as its first argument a path to a filename.
Each line in this file is one test case. Each test case will contain an integer n < 10,000.
Assume each test case will always have an answer and that it is computable with less than 100 iterations (additions).
OUTPUT SAMPLE:
For each line of input, generate a line of output which is the number of iterations (additions)
to compute the palindrome and the resulting palindrome. (they should be on one line and separated by a single space character).
'''
def palindromo(x):
if x==x[::-1]:
return True
return False
import sys
fil = open(sys.argv[1], 'r')
test_cases=fil.read().strip().splitlines()
for test in test_cases:
snum=test
c=0;
while True:
num = int(snum)
snum=snum[::-1]
snum=str(num+int(snum))
c=c+1
if palindromo(snum):
break
print c,snum |
import flask
from flask_wtf import FlaskForm
from wtforms import BooleanField
from wtforms.validators import DataRequired
from costreport.services.admin_services import (
create_costcode,
check_if_project_has_costcodes,
add_default_costcodes_to_project,
)
from costreport.services.costcode_services import (
check_if_costcode_exists,
get_costcodes,
)
from costreport.services.projects_service import check_if_project_exists
blueprint = flask.Blueprint(
"add_default_costcodes_to_project",
__name__,
template_folder="templates",
url_prefix="/admin",
)
class AddDefaultCostcodesForm(FlaskForm):
tick_box = BooleanField("Apply default costcodes")
@blueprint.route("/add_default_costcodes_to_project", methods=["GET"])
def add_default_costcodes_to_project_get():
project_code = flask.request.args.get("project")
# check if the project exists
if check_if_project_exists(project_code) is False:
flask.abort(404)
form = AddDefaultCostcodesForm()
# check if there are existing costcodes
if check_if_project_has_costcodes(project_code):
flask.flash(
"Costcodes already exist, defaults cannot be imported", "alert-danger"
)
costcodes_exists = True
else:
costcodes_exists = False
return flask.render_template(
"admin/add_default_costcodes_to_project.html",
form=form,
project=project_code,
costcodes_exists=costcodes_exists,
)
@blueprint.route("/add_default_costcodes_to_project", methods=["POST"])
def add_default_costcodes_to_project_post():
project_code = flask.request.args.get("project")
if check_if_project_exists(project_code) is False:
flask.abort(404)
form = AddDefaultCostcodesForm()
if form.validate_on_submit():
print("Tick box is", form.tick_box.data)
if check_if_project_has_costcodes(project_code):
flask.flash("Costcodes already exist, defaults cannot be imported")
elif form.tick_box.data is True:
print("adding default costcodes to project")
add_default_costcodes_to_project(project_code)
flask.redirect(flask.url_for("projects.projects"))
return flask.render_template(
"admin/add_default_costcodes_to_project.html", form=form, project=project_code,
)
|
import matplotlib
matplotlib.use('Qt4Agg')
# matplotlib.use('TKAgg')
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib as mpl
from util.transformations import apply_matrix_to_vertices
mpl.rcParams['figure.figsize'] = 10, 8
class Painter:
def __init__(self, azimuth=5, elevation=45):
self.fig, self.ax = self.configure_plot(azimuth, elevation)
@staticmethod
def configure_plot(azimuth, elevation):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.ion()
ax.view_init(azim=azimuth, elev=elevation) # default: 30, 60 nice: 5, 45
## tried to make x, y, z axis have the same unit, but not working...
# ax = fig.add_subplot(111, projection='3d', aspect='equal')
# ax.axis('equal')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
return fig,ax
def draw(self):
# NOTE the following 2 lines of code to set aspect "equal" are from
# http://stackoverflow.com/questions/8130823/set-matplotlib-3d-plot-aspect-ratio
scaling = np.array([getattr(self.ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
self.ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)
plt.draw()
def show(self, block=True):
# NOTE the following 2 lines of code to set aspect "equal" are from
# http://stackoverflow.com/questions/8130823/set-matplotlib-3d-plot-aspect-ratio
scaling = np.array([getattr(self.ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
self.ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3)
plt.show(block=block)
def draw_point(self, p, color='bo', line_width=1):
return self.ax.plot([p[0]], [p[1]], [p[2]], color, linewidth=line_width)
def draw_line(self, a, b, line_width=1, color='c-'):
link = np.array([a, b])
return self.draw_with_points(link, color=color, line_width=line_width)
def draw_with_points(self, verts, color='c-', line_width=1):
x_values = verts[:,0]
y_values = verts[:,1]
z_values = verts[:,2]
return self.ax.plot(x_values, y_values, z_values, color, linewidth=line_width)[0]
def draw_global_coordinates(self, unit=2):
self.draw_coordinates(unit=unit)
def draw_coordinates(self, transformation_matrix=None, unit=1):
x_axis = np.array([[0,0,0], [unit,0,0]])
y_axis = np.array([[0,0,0], [0,unit,0]])
z_axis = np.array([[0,0,0], [0,0,unit]])
if transformation_matrix is not None:
x_axis = apply_matrix_to_vertices(x_axis, transformation_matrix)
y_axis = apply_matrix_to_vertices(y_axis, transformation_matrix)
z_axis = apply_matrix_to_vertices(z_axis, transformation_matrix)
self.draw_with_points(x_axis, color='r-')
self.draw_with_points(y_axis, color='g-')
self.draw_with_points(z_axis, color='b-') |
import cocos
import pyglet
from cocos.director import director
class Background(cocos.layer.Layer):
def __init__(self):
super().__init__()
self.setBlock()
def setBlock(self, path_img=""):
self.__set(0, path_img)
def setPosible(self, path_img=""):
self.__set(1, path_img)
def setSelected(self, path_img=""):
self.__set(2, path_img)
def setCorupted(self, path_img=""):
self.__set(3, path_img)
def setImage(self, path_img):
if (path_img == ""):
path_img = "assets/sprites/scenes/center/center.png"
self.backgorund_imagen = pyglet.image.ImageGrid(pyglet.image.load(path_img),1,4)
def __set(self, bkg_img, path_img):
try:
self.remove(self.spr)
except AttributeError:
pass
self.setImage(path_img)
self.spr = cocos.sprite.Sprite(self.backgorund_imagen[bkg_img], scale=0.8)
self.spr.position = (450, 450)
self.add(self.spr) |
# class suibian(object):
# def __init__(self,value):
# self.value=value
# def __call__(self,arg1,arg2):
# return arg1-arg2
# class Sample:
# def __enter__(self):
# return self
# def __exit__(self, type,
# value, trace):
# print ("type:", type)
# print ("value:",value)
# print ("trace:",trace)
# def do_something(self):
# bar = 1/0
# return bar + 10
# if __name__=='__main__':
# with Sample() as sample:
# sample.do_something()
# def line_conf(a,b):
# # v={}#use dict to store
# # v['0']=a*b
# v=[0]
# v[0]=a*b
# def line(x):
# # nonlocal i #only can be used in python 3 which means this variable is not in this local function
# # v['0']=v['0']+x
# v[0]=v[0]+x
# return v[0]*x+b
# return line
# if __name__=='__main__':
# l1=line_conf(4,5)
# # l2=line_conf(10,4)
# print(l1(5))
# # print(l2(3))
# Example for the recursion:
# Recursion in the generator,
# def rerc_generator(l):
# for s in l:
# if isinstance(s,list):
# # yield rerc(s) # this doen't work,since every time yield will generate a new object
# for item in rerc(s):
# yield item
# else:
# yield s
# # if we recurse by the value instead of generator:
# def rerc(l):
# for s in l:
# if isinstance(s,list):
# rerc(s)
# # yield rerc(s) # this doen't work,since every time yield will generate a new object
# # for item in rerc(s):
# # yield item
# else:
# print s
# def facto(n):
# if n>=1:
# return n * facto(n-1)
# else:
# return 1
# #Pickle module
# import pickle
# class Bird(object):
# def __init__(self,n):
# self.n=n
# if __name__=='__main__':
# a=Bird(3)
# # b=pickle.dumps(a)
# fn='a.pkl'
# with open(fn,'w') as f:
# pickle.dumps(a,f)
# with open('test.data','r') as f:
# b=pickle.load(f)
# print b.n
# Experiment about the attribute in class
# class Circle(object):
# def __init__(self,name):
# self.name=name
# def __getattr__(self,name):
# if name=='area':
# return 4
# elif name=='length':
# return 8
# else:
# object.__getattr__(self,name)
# def __setattr__(self,name,value):
# if name in ['area','length']:
# raise TypeError('not acceptable')
# object.__setattr__(self,name,value)
# Overload __repr__(self) and __str__(self)
# class complex(object):
# def __init__(self,real,img):
# self.real=real
# self.img=img
# def __repr__(self):
# return 'Complex(%s,%sjjj)' % (self.real,self.img)
# def __str__(self):
# return '(%g+%gjjjjj)' % (self.real,self.img)
# def __add__(self,other):
# return complex(self.real+other.real,self.img+other.img)
# if __name__=='__main__':
# a=complex(2,3)
# a+complex(2,3) #the result will go into the __repr__(self) and show in the interactive screnn
# print a+complex(2,3) # __str__ methos is simple way, often show in the print commend,if we
# # don't define __str__(self) , we will print (__repr__(self))
# __call__ method in the class:
# class A(object):
# def __init__(self,value):
# self.value = value
# def __call__(self,value):
# return self.value * value
"""
__getattr__ and __setattr__
the attribute in the instance is stored in dictionary self.__dict__.
notice that the key of this dictionary must be string.
"""
# class A(object):
# def __getattr__(self,value):
# if value=='name':
# return 40
# else:
# raise AttributeError, name
# def __setattr__(self,key,value):
# if key == 'age':
# self.__dict__[key]=value
# else:
# raise AttributeError,key
# if __name__=='__main__':
# a=A()
# print a.name
# # print a.age
# # a.name=1
# a.age=12
"""
a example of decorator
"""
# from functools import wraps
def mu(x):
# @wraps(x)
def _mu(*args,**kwargs):
a,b=args
print "in decorator"
return a*a
return _mu
@mu
def test(x,y):
print (x,y)
if __name__=='__main__':
print test(2,3)
|
guess = 'please make a rock, paper or scissors guess: '
play = True
def play_again():
answer = input('Would you like to play again? ')
if answer != 'y':
play = False
while play:
p1 = input(f'Player {1} {guess}')
p2 = input(f'Player {2} {guess}')
if p1 == p2:
print("It's a tie")
elif p1 == 'rock' and p2 == 'scissors':
print('p1 wins')
play = play_again()
elif p1 == 'scissors' and p2 == 'paper':
print('p1 wins')
play = play_again()
elif p1 == 'paper' and p2 == 'rock':
print('p1 wins')
play = play_again()
else:
print('p2 wins')
play = play_again()
|
#!/usr/bin/env python
# coding: utf-8
# In[9]:
# 달력작업에 필요한 함수를 만든다
# 년도를 인수로 넘겨 받아 윤년, 평년을 판단해 윤년이면 True, 평년이면 False를 리턴하는 함수
# 논리값을 리턴하는 함수나 논리값을 기억하는 변수의 이름은 'is'로 시작하는 것이 관행이다
def isLeapYear(year):
# 년도가 4로 나눠 떨어지고 100으로 나눠 떨어지지 않거나 400으로 나눠 떨어지면 윤년
return year % 4 == 0 and year % 100 != 0 or year % 400 == 0
# 년, 월을 인수로 넘겨 받아 그 달의 마지막 날짜를 리턴하는 함수
def lastDay(year, month):
# 12달의 마지막 날짜를 기억하는 리스트를 만든다
m = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# 윤년 평년에 따른 2월의 마지작 날짜를 결정한다
if isLeapYear(year):
m[1] = 29
# 마지막 날짜를 리턴한다
return m[month - 1]
# 년, 월, 일을 인수로 넘겨 받아 1년 1월 1일부터 지나온 날짜의 합계를 리턴하는 함수
def totalDay(year, month, day):
# 1년 1월 1일부터 전년도 12월 31일까지 지난 날짜를 계산한다
total = (year - 1) * 365 + (year - 1) // 4 - (year - 1) // 100 + (year - 1) // 400
# 전년도 12월31일까지 지난 날짜에 전달까지 지난 날짜를 더한다
for i in range(1, month):
total += lastDay(year, i)
# 전달까지 지난 날짜에 일을 더해서 리턴한다
return total + day
# 년 월 일을 인수로 넘겨 받아 요일을 숫자로 리턴하는 함수
# 일요일(0), 월요일(1), 화요일(2), 수요일(3), 목요일(4), 금요일(5), 토요일(6)
def weekDay(year, month, day):
return totalDay(year, month, day) % 7
# In[4]:
if __name__ == '__main__':
print(isLeapYear(2020))
print(lastDay(2020, 3))
print(totalDay(2020, 3, 4))
# In[10]:
get_ipython().system(' jupyter nbconvert --to script calendarModule.ipynb')
|
from deeprobust.graph.data import Dataset
from deeprobust.graph.defense import DeepWalk, Node2Vec
from deeprobust.graph.global_attack import NodeEmbeddingAttack
import numpy as np
dataset_str = 'cora_ml'
data = Dataset(root='/tmp/', name=dataset_str, seed=15)
adj, features, labels = data.adj, data.features, data.labels
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
attacker = NodeEmbeddingAttack()
attacker.attack(adj, attack_type="remove", n_perturbations=1000)
modified_adj = attacker.modified_adj
# train defense model
print("Test DeepWalk on clean graph")
model = DeepWalk()
model.fit(adj)
model.evaluate_node_classification(labels, idx_train, idx_test)
# model.evaluate_node_classification(labels, idx_train, idx_test, lr_params={"max_iter": 1000})
print("Test DeepWalk on attacked graph")
model.fit(modified_adj)
model.evaluate_node_classification(labels, idx_train, idx_test)
print("Test DeepWalk on link prediciton...")
model.evaluate_link_prediction(modified_adj, np.array(adj.nonzero()).T)
print("Test DeepWalk SVD on attacked graph")
model = DeepWalk(type="svd")
model.fit(modified_adj)
model.evaluate_node_classification(labels, idx_train, idx_test)
print("Test Node2vec on attacked graph")
model = Node2Vec()
model.fit(modified_adj)
model.evaluate_node_classification(labels, idx_train, idx_test)
|
import tensorflow as tf
from layers.activation import relu
from layers.pooling import max_pool_2D
from layers.trainable import fc, conv_2D, residual_block
from layers.normalization import bn
from layers.regularization import weight_decay, var, shade, shade_conv
#import math
import numpy as np
REG_COEF = 0.8
FC_WEIGHT_STDDEV=0.01
CONV_WEIGHT_STDDEV=0.01
CONV_WEIGHT_DECAY = 0#0.0001
FC_WEIGHT_DECAY= 0#0.0005
#CONV_WEIGHT_DECAY = 0.000000005
#FC_WEIGHT_DECAY= 0.00000001
def optim_param_schedule(monitor):
epoch = monitor.epoch
momentum = 0.9
if epoch < 120:
lr = 0.01
elif epoch < 180:
lr = 0.001
else:
lr = 0.00001
return {"lr":lr, "momentum":momentum}
def layer_regularizer():
n_layers = 5
regs = []
for i in range(n_layers):
regs.append([tf.reduce_sum(tf.get_collection('layer_'+str(i+1)+'_reg')), tf.get_collection('layer_'+str(i+1)+'_variables')])
return regs
conv_init = tf.truncated_normal_initializer(stddev=CONV_WEIGHT_STDDEV)
fc_init = tf.truncated_normal_initializer(stddev=FC_WEIGHT_STDDEV)
def inference(inputs, training_mode):
x = inputs
with tf.variable_scope('layer_5'):
field = 'conv_1'
n_out = 16
x, params = conv_2D(x, 5, 1, n_out, conv_init, field, True)
weight_decay(x, params, tf.get_variable_scope().name, field, CONV_WEIGHT_DECAY)
#x = relu(x)
x = max_pool_2D(x, 3, 2)
with tf.variable_scope('layer_4'):
field = 'res1'
ksizes = [3,3]
strides = [1, 1]
filters_out = [32, 32]
x, params = residual_block(x, ksizes, strides, filters_out, conv_init, relu, field, training_mode)
#x, params = conv_2D(x, 5, 1, n_out, conv_init, field, True)
weight_decay(x, params, tf.get_variable_scope().name, field, CONV_WEIGHT_DECAY)
#tf.add_to_collection(collection+'_reg', tf.multiply(reg, CONV_WEIGHT_DECAY, name='reg'))
x = max_pool_2D(x, 3, 2)
with tf.variable_scope('layer_3'):
n_out = 64
field = 'conv_3'
x, params = conv_2D(x, 5, 1, n_out, conv_init, field)
weight_decay(x, params, tf.get_variable_scope().name, field, CONV_WEIGHT_DECAY)
x = bn(x, training_mode, field)
x = relu(x)
x = max_pool_2D(x, 3, 2)
x = tf.reshape(x, [-1, 4*4*n_out])
with tf.variable_scope('layer_2'):
field = 'fc1'
n_out = 1000
x, params = fc(x, n_out, fc_init, field)
weight_decay(x, params, tf.get_variable_scope().name, field, FC_WEIGHT_DECAY)
x = relu(x)
with tf.variable_scope('layer_1'):
field = 'fc2'
outputs, params = fc(x, 10, fc_init, field)
weight_decay(outputs, params, tf.get_variable_scope().name, field, FC_WEIGHT_DECAY)
return outputs, outputs
|
# from django.shortcuts import get_object_or_404
# from rest_framework.decorators import api_view, permission_classes
# from rest_framework import status, permissions
# from rest_framework.exceptions import PermissionDenied
# from django.http import HttpResponse, JsonResponse
# from django.core.paginator import Paginator
# from django.contrib.auth.decorators import permission_required
# from .serializer import PetSerializer
# from .models import Pet
# # Create your views here.
#
#
#
# # from rest_framework.views import APIView
# # from rest_framework.parsers import MultiPartParser, FormParser
# # from rest_framework.response import Response
# # from rest_framework import status
# # from apps.petstore.models import *
# # from apps.petstore.serializer import PetSerializer
# #
# #
# # class IndexPetView(APIView):
# # # MultiPartParser AND FormParser
# # # https://www.django-rest-framework.org/api-guide/parsers/#multipartparser
# # # "You will typically want to use both FormParser and MultiPartParser
# # # together in order to fully support HTML form data."
# # parser_classes = (MultiPartParser, FormParser)
# #
# # def post(self, request, *args, **kwargs):
# # file_serializer = PetSerializer(data=request.data)
# # if file_serializer.is_valid():
# # file_serializer.save()
# # return Response(file_serializer.data, status=status.HTTP_201_CREATED)
# # else:
# # return Response(file_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
#
#
# # from django.shortcuts import render
# # from django.views import generic
# # from django.contrib.auth.mixins import LoginRequiredMixin
# # from apps.petstore.models import *
# # # Create your views here.
# #
# #
# #
# # def getPetById(petId):
# # """
# # Find pet by ID, Returns a pet when ID < 10. " + "ID > 10 or nonintegers will simulate API error conditions
# # ApiErrors
# # - 400, reason = "Invalid ID supplied"
# # - code = 404, reason = "Pet not found"
# # :param petId: required = true, ID of pet that needs to be fetched
# # :return:
# # """
# # print('Hello')
# #
# #
# # def addPet(pet):
# # """
# # Add a new pet to the store
# # API Errors
# # - 405, Invalid input
# # :param pet: required = true, Pet object that needs to be added to the store
# # :return:
# # """
# # print('Hello')
# #
# #
# # def updatePet(pet):
# # """
# # Update an existing pet
# # 400 - Invalid ID supplied
# # 404 - pet not found
# # 405 - Validation exception
# # :param pet: required = true, Pet object that needs to be added to the store
# # :return:
# # """
# # print('Hello')
# #
# #
# #
# # def findPetsByStatus(status):
# # """
# # Finds Pets by status, Multiple status values can be provided with comma separated strings.
# # Invalid status value if 400
# # status - Status values that need to be considered for filter
# # :param status: status required=true, defaultValue="available", allowableValues="available,pending,sold", allowMultiple=true
# # :return:
# # """
# # print('Hello')
# #
# #
# # def findPetsByTags(tags):
# # """
# # "Finds Pets by tags, Multiple tags can be provided with comma separated strings. Use tag1, tag2, tag3 for testing."
# # :param tags: tags String value = "Tags to filter by", required = true, allowMultiple = true
# # :return:
# # """
# # print('Hello')
# #
# #
# # #
# # # class Index(LoginRequiredMixin, generic.View):
# # # template_name = "index.html"
# # # login_url = 'login/'
# # #
# # # def __init__(self, **kwargs):
# # # pass
# # #
# # # def get(self, request):
# # # if request.user.is_superuser:
# # # organizations = Pet.objects.filter(owner=request.user)
# # # else:
# # # organizations = Pet.objects.filter()
# # #
# # # return render(request, self.template_name, {'organizations': organizations})
# # #
# # #
# # # class OrganizationProfile(LoginRequiredMixin, generic.DetailView):
# # # template_name = "profile.html"
# # # login_url = 'login/'
# # #
# # # def __init__(self, **kwargs):
# # # pass
# # #
# # # def get(self, request, petId, *args, **kwargs):
# # # organization = Pet.objects.get(pk=petId)
# # #
# # # if request.user.is_superuser:
# # # organization = Pet.objects.get(pk=petId)
# # #
# # # return render(request, self.template_name, {'organization': organization})
# # #
# # #
# # # class Browse(LoginRequiredMixin, generic.View):
# # # template_name = "browse.html"
# # # login_url = 'login/'
# # #
# # # def __init__(self, **kwargs):
# # # pass
# # #
# # # def get(self, request):
# # # categories = Category.objects.filter(active=True).order_by('name')
# # # cities = City.objects.filter(active=True).order_by('name')
# # # return render(request, self.template_name, {'categories': categories, 'cities': cities})
#
#
# # from apps.petstore.models import Pet
# # from apps.petstore.serializer import *
# # from apps.petstore.permissions import ReadOnly
# # from rest_framework.response import Response
# # from django.http import Http404
# # # Create your views here.
# #
# # ################################# Function Based APIView ###############################################################
# #
# #
# # from rest_framework.views import APIView
# # from apps.petstore.serializer import HelloWorldSerializer
# #
# #
# # # class HelloWorldView(APIView):
# # # def get(self, request):
# # # return Response({"message": "Hello World!"})
# # #
# # # def post(self, request):
# # # serializer = HelloWorldSerializer(data=request.data)
# # # if serializer.is_valid():
# # # valid_data = serializer.data # Cleaning same as we do in Django Forms
# # #
# # # name = valid_data.get("name")
# # # age = valid_data.get("age")
# # #
# # # return Response({"message": "Hello {}, you're {} years old".format(name, age)}, status=status.HTTP_201_CREATED)
# # # else:
# # # return Response({"errors": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)
# #
# #
# # class RESTPetView(APIView):
# #
# # def get_object(self, pk):
# # try:
# # return Pet.objects.get(pk=pk)
# # except Pet.DoesNotExist:
# # raise Http404
# #
# # def get(self, request):
# # all_pets = Pet.objects.filter(status='available').all()
# # serialized_pets = PetSerializer(all_pets, many=True)
# # return Response(serialized_pets.data)
# #
# # # def get(self, request, pk, format=None):
# # # product = self.get_object(pk)
# # # serializer = PetSerializer(product)
# # # return Response(serializer.data)
# #
# # def post(self, request):
# # serializer = PetSerializer(data=request.data)
# # if serializer.is_valid():
# # subscriber_instance = Pet.objects.create(**serializer.data)
# # return Response({"message": "Created Pet {}".format(subscriber_instance.id)})
# # else:
# # return Response({"errors": serializer.errors})
# #
# # def put(self, request, pk, format=None):
# # product = self.get_object(pk)
# # serializer = PetSerializer(product, data=request.data)
# # if serializer.is_valid():
# # serializer.save()
# # return Response(serializer.data)
# # return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# #
# # def delete(self, request, pk, format=None):
# # product = self.get_object(pk)
# # product.delete()
# # return Response(status=status.HTTP_204_NO_CONTENT)
# # #
# # # ################################# DRF: ViewSet, ModelViewSet and Router #################################
# # #
# # #
# # # from rest_framework import viewsets, permissions, status
# # #
# # #
# # # # class UserViewSet(viewsets.ModelViewSet):
# # # # """
# # # # Provides basic CRUD functions for the User model
# # # # """
# # # # queryset = User.objects.all()
# # # # serializer_class = UserSerializer
# # # # permission_classes = (ReadOnly, )
# # #
# # #
# # # class PetViewSet(viewsets.ModelViewSet):
# # # """
# # # Provides basic CRUD functions for the Pet model
# # # retrieve:
# # # Return a user instance.
# # #
# # # list:
# # # Return all users, ordered by most recently joined.
# # #
# # # create:
# # # Create a new user.
# # #
# # # delete:
# # # Remove an existing user.
# # #
# # # partial_update:
# # # Update one or more fields on an existing user.
# # #
# # # update:
# # # Update a user.
# # # """
# # # queryset = Pet.objects.all()
# # # serializer_class = PetSerializer
# # # # permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
# # # # permission_classes = (permissions.IsAuthenticated,)
# # #
# # # def perform_create(self, serializer):
# # # serializer.save(user=self.request.user)
# # #
# # #
# # # class PetViewSet(viewsets.ModelViewSet):
# # # """
# # # Provides basic CRUD functions for the Pet model
# # # retrieve:
# # # Return a user instance.
# # #
# # # list:
# # # Return all users, ordered by most recently joined.
# # #
# # # create:
# # # Create a new user.
# # #
# # # delete:
# # # Remove an existing user.
# # #
# # # partial_update:
# # # Update one or more fields on an existing user.
# # #
# # # update:
# # # Update a user.
# # # """
# # # queryset = Pet.objects.all()
# # # serializer_class = PetSerializer
# # # # permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
# # # # permission_classes = (permissions.IsAuthenticated,)
# # #
# # # def perform_create(self, serializer):
# # # serializer.save(user=self.request.user)
# # #
# # #
# # # class findByStatusView(APIView):
# # # """
# # # summary: "Finds Pets by status"
# # # description: "Multiple status values can be provided with comma seperated strings"
# # # operationId: "findPetsByStatus"
# # # parameters:
# # # - name: "status"
# # # description: "Status values that need to be considered for filter"
# # # required: true
# # # type: "array"
# # # items:
# # # type: "string"
# # # enum:
# # # - "available"
# # # - "pending"
# # # - "sold"
# # # default: "available"
# # # """
# # #
# # # def get(self, request):
# # # all_hashtags = Pet.objects.all()
# # # serialized_hashtags = PetSerializer(all_hashtags, many=True)
# # # return Response(serialized_hashtags.data) |
#!/usr/bin/env python
from absl import app
from grr_response_core.lib import rdfvalue
from grr_response_server.export_converters import rdf_primitives
from grr.test_lib import export_test_lib
from grr.test_lib import test_lib
class RDFBytesToExportedBytesConverterTest(export_test_lib.ExportTestBase):
def testRDFBytesConverter(self):
data = rdfvalue.RDFBytes(b"foobar")
converter = rdf_primitives.RDFBytesToExportedBytesConverter()
results = list(converter.Convert(self.metadata, data))
self.assertNotEmpty(results)
exported_bytes = [
r for r in results if r.__class__.__name__ == "ExportedBytes"
]
self.assertLen(exported_bytes, 1)
self.assertEqual(exported_bytes[0].data, data)
self.assertEqual(exported_bytes[0].length, 6)
class RDFStringToExportedStringConverterTest(export_test_lib.ExportTestBase):
def testRDFStringConverter(self):
data = rdfvalue.RDFString("foobar")
converter = rdf_primitives.RDFStringToExportedStringConverter()
results = list(converter.Convert(self.metadata, data))
self.assertLen(results, 1)
self.assertIsInstance(results[0], rdf_primitives.ExportedString)
self.assertEqual(results[0].data, str(data))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
from django.shortcuts import render
from django.db.models import Q
from django.shortcuts import render_to_response
from consolecommand import models
from datetime import datetime
import datetime
from django.db.models import Count
from django.db import connection
def top_100(request):
items = models.OrderItems.objects.all().annotate(product_name_=Count('product_name')).order_by('amount')[:100]#values('product_name').annotate(product_name_=Count('product_name'))#.values('product_name','order_id','product_price','order_id').order_by('amount')
print(items.query)
total_count = 0
queries = connection.queries
total_queries = len(queries)
return render_to_response("reporttop100_template.html", {
"items" : items,
"total_queries": total_queries,
}) |
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, UsernameField
from django.contrib.auth.models import User
from .models import Students, UserCollege, TShirt, CodingCompetition
# class CustomAuthenticationForm(AuthenticationForm):
# username = UsernameField(
# label = 'Team Name',
# widget = forms.TextInput(attrs={'autofocus':True})
# )
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
email = forms.EmailField()
fields = ['username', 'email', 'password1', 'password2']
# def __init__(self, *args, **kwargs):
# super(UserRegisterForm, self).__init__(*args, **kwargs)
# self.fields['username'].label = "Team Name"
# def __init__(self, *args, **kwargs):
# super(UserRegisterForm, self).__init__(*args, **kwargs)
# self.fields['first_name'].required = True
class CollegeForm(forms.ModelForm):
class Meta:
model = UserCollege
exclude = ['user']
class ShirtForm(forms.ModelForm):
class Meta:
model = TShirt
fields = "__all__"
class CodingCompetitionForm(forms.ModelForm):
class Meta:
model = CodingCompetition
fields = "__all__"
|
#! /usr/bin/env python
#coding=utf-8
import os
import sys
print ("hello")
print ("world")
print (2**8)
jack = "okay"
print (jack)
for x in "spam":
print (x)
print ("done")
print (sys.platform)
# from imp import reload #要避免使用此用法
# reload (hello)
import myfile
print (myfile.title) #先导入模块,并获取模块属性
from myfile import title #类似于从myfile模块中复制了title属性,使其成为接收者的直接变量
print (title)
exec(open("myfile.py").read()) #使用exec运行模块文件 ,类似于import 效果,但不会导入模块
exec(open("myfile.py")) #自动读取文件的内容
# L = [1,2]
# L.append(L)
# print (L)
print ("-" * 50)
# 第4章
# 程序--模块--语句--表达式--建立并处理对象
# 数字 1234
# 字符串 "spam"
# 列表 [1,2]
# 字典 {"quality":4,"color":"red"}
# 元组 (1,'spam',4,'u')
# 集合 set('abc'),{'a','b','c'}
# 文件 myfile = open('eggs','r')
# 其它类型 类型,None ,布尔型
# 函数 def
# 模块 import
# 类 class
# 数字
print("数字===")
length = len(str(2**10))#求长度
print length
import math #数学模块
print math.pi,math.sqrt(85)
import random #随机数模块
print random.random()
print random.choice([1,2,3,4]) #随机选择器
# 字符串:
print ("字符串====")
s = "spam"
print len(s),s[2],s[-1] #反向索引,从右边开始
print s[1:3] #分片操作,从1开始直到3结束,共两个数据
print s[1:],s[0:3],s[:-1],s[:] #左边默为0,右边默认是分片序列的长度
print s+"xyz" # + 号进行字符串合并
print s*8 # *号字符串重复
# 核心类型中,数字,字符串,元组是不可变的
# 字典,列表可以自由改变
print s.find("pa") #查找子字符串,若找到返回偏移量,若没有找到返回-1
print s.replace("pa",'xyz')
line = "aaa,bbb,ccc,ddd\n"
print line.split(",")#以逗号 分开
print line.upper() #转换为大写
print line.isalpha()#测试字符串内容,数字,字母或其他
print line.rstrip()#支掉字符串后面的空格字符
# help(line.strip)
# 列表:
print ("列表===")
# 一个任意类型的对象的位置相关的有序集合,类似于数组,但无固定类型的约束
l = [123,"spam",1.23]
l.append("ni") #扩充列表大小,并在列表尾部插入一项
print l
print l.pop(2) #移除给定偏移量的一项,让列表减小
print l
l.sort()#排序
l.reverse()#反转
# col2 = [row[1] for row in M] # 把矩阵M的每个row中的row[1],放在一个新的列表中,组成一个新的列表
# 字典 键:值对,映射关系
print("字典===")
d = {"food":"spam","quality":4,"color":"red"}
print d["food"]
d["quality"] +=1 #
print d
d={}
d["name"] = "bob"
d["job"] = "it"
d["age"] = 30
print d
print d["name"]
# 引入for
d = {"a":1,"b":2,"c":3}
print d
ks = list(d.keys())
print ks
ks.sort()
print ks
for key in ks:
print (key,"=>",d[key])
for c in "spam":
print c.upper()
x = 4
while x>0:
print ("spam!" *x)
x -=1
squares = [x ** 2 for x in [1,2,3,4,5]] #迭代方法
print squares
# 等同于下面的for
squares =[]
for x in [1,2,3,4,5]:
squares.append(x ** 2)
print squares
# if not "f" in d:
# print ("missing")
# 元组:
print ("元组========")
t = (1,2,3,4)
print t
t + (5,6)
print t,t[0]
print t.index(3)#求数据对应的index
print t.count(4)#求数据出现的次数
# 文件:
f = open("data.txt","w")
f.write("hello")
f.write(",world")
f.close()
f = open("data.txt","r")#读操作 r 默认可以忽略
text = f.read()
print text,text.split()#分隔符对字符串进行切片
text2 = open("data.txt","rb").read()
print text2[4:8]
# 集合
print ("集合=====")
x = set("spam")
print x
if type(l) == type([]):
print "yes"
# 用户定义的类
print ("定义类====")
class Worker:
def __init__(self,name,pay):
self.name = name
self.pay = pay
def lastName(self):
return self.name.split()[-1]
def giveRaise(self,percent):
self.pay *= (1.0 +percent)
bob = Worker("bob smith",50000)
sue = Worker("sue jones",60000)
print bob.lastName()
sue.giveRaise(.10)
print sue.pay
# 不可变性:数字,字符串,元组
# 序列:字符串,列表,元组,可对位置进行排序
# 数字类型
# 八进制: 0o1,0o2
# 16进制: 0x01,0x02
# 2进制: 0b1,0b1001
# oct函数将十进制数转换为八进制数
# hex函数会将十进制数转换为十六进制数
# bin函数会将十进制数转换为二进制数
x = set("abcde")
y = set("bdxyz")
print x
print x-y
print x|y
print x&y
print x^y
x.add("f")
print x
x.update("x")
print x
x.remove("b")
print x
for item in x:
print (item *3)
print ("-" * 50)
# 第6章:动态类型
# a = 3 表示变量a变成对象3的一个引用,在内部,变量事实上是到对象内存空间的一个指针
# python中变量名没有类型,对象才有类型
# a = 3
# a = "string"
# a = 1.23
# 上面只是把a修改为对不同的对象的的引用
# python中,每当一个变量名被赋予一个新的对象,之前的那个对象占用的空间就会被回收(如果它没有被其他的变量名或对象所引用的话),这种自动回收对象空间的技术叫垃圾收集
# 共享引用:多个变量名引用了同一个对象
# a = 3
# b = a
L1 = [2,3,4]
L2 = L1[:] # copy L1对象,从头到尾的分片
L1[0] = 24
print L1,L2
import copy #通用的复制任意对象类型的调用
L2 = copy.copy(L1)
L3 = copy.deepcopy(L1) #copy嵌套对象结构(如嵌套了列表的一个字典)
print L2,L3
# 检查是否相等
# ==操作符,测试两个被引用的对象是否有相同的值
# is操作符,检查对象的同一性,如果两个变量名精确地指向同一个对象,则返回true
print ("-" * 50)
# 第7章:字符串
# s = b'spam'
# s = u'spam' #使用的unicode字符串
# s1 + s2 #合并,重复
# s * 3
# s[i] #索引,分片,求长度
# s[i:j]
# len(s) #返回多少个字节
# s.find("a") #搜索
# s.rstrip() #移除空格
# s.replace("pa","xx")#替换
# s.split(",") #分隔
# s.isdigit()#内容测试
# s.endswith("spam")#结束测试
print s
# myfile = open(r"c:\new\text.dat","w") #r表示关闭转义机制
# myfile = open("c:\\new\\text.dat","w")
# 偏移和分片:位置偏移从左至右(偏移0为第一个元素),而负偏移是由末端右侧开始计算(偏移-1为最后一个元素)
# s[0]
# s[-2] #取倒数第二个元素
# s[i:j]
# s[1:3]#取偏移为1的元素,直到但不包括偏移为3的元素
# s[1:]#取偏移为1的元素,直到末尾
# s[:3]#取偏移为0的元素,直到但不包括偏移为3之间的元素
# s[:-1]#取偏移为0的元素,直到但不包括最后一个元素之间的元素
# s[:]#取偏移为0到末尾的元素
# s[1:10:2] #取偏移值1-9之间,间隔一个元素的元素
print (sys.argv)
print int("42") #int将字符串转换为数字
print str(42) #将数字转换为字符串
s = "42"
i = 1
print int(s)+1
print ord("s")
# 字符串是不可变序列
# 若要改变
s = "splot"
s = s.replace("pl","pamal")
print s
s = "spammy"
l = list(s) #以任意序列中的元素创立一个新的列表
print l
l[3] = "x"
print l
s = "".join(l) #join 将列表合成一个字符串
print s
line = "aaa bbb ccc"
cols = line.split()#将一个字符串侵害为一个子字符串的列表
print cols
line2 = "aaa,bbb,ccc"
cols2 = line2.split(",")
print cols2
list = list("spam")
print ("first = {0[0]},third = {0[2]}".format(list))
print ("the %s side %s %s" % ("bright","of","life"))
# 不可变性:数字,字符串,元组,不可变集合
# 可变性:列表,字典,可变集合
# 第8章: 列表与字典
# 列表:
# 任意对象的有序集合
# 通过偏移读取
l = []
l = ["a",["b","c"]]
# l = list("spam")
# l = list(range(-4,4))
# l1 + l2 #合并,重复
# l * 3
# for x in l: print (x) #迭代,成员关系
# 3 in l
# l.append(4) #末尾增加
# l.extend([5,6,7])
# l.insert(0,8)#插入
# l.count(6)#次数
# l.sort()#排序
# l.reverse()#反转
# del l[6] #删除index对应的值
# del l[2:4] #删除指定index范围内元素
# l.pop()#删除最后一个元素
# l.pop(1)#删除指定index元素
# l.remove("a")#删除某个元素
l = [x ** 2 for x in range(5)]
# l = list(map(ord,"spam"))
print l
for x in [1,2,3]:
print (x)
print ("后进先出LIFO last-in-first-out")
l = []
l.append(1)
l.append(2)
print l
l.pop()
print l
##############################
print ("先进先出FIFO first-in-first-out")
l = []
l.append(1)
l.append(2)
l.reverse()
print l
l.pop()
print l
# 字典
d = {}
d = {"spam":2,"eggs":3}
d = {"food":{"ham":1,"egg":2}}
print d
# d = dict(zip(keyslist,valsist))
# d = dict(name = "bob",age =2)
# d.keys()#键
# d.values()#值
# d.items()#键 + 值
# d.copy()#副本
# d.get(key,default)
# d.update(d2) #合并
# d.pop(key) #删除
# len(d)
# d[key] = 4 #新增/修改键
# del d[key]#根据键删除条目
# list(d.keys())#字典视图
d = {"spam":3,"eggs":5}
print d["eggs"]
d2 = {"toast":4,"muilk":5}
d.update(d2)
print d
####################################
table = {"python":"Guido van Rossum",
"perl":"Larry Wall",
"tcl":"John Ousterhout"}
language = "python"
creater = table[language]
print creater
for lang in table:
print (lang,table[lang])
###################################
L = [0]*100
L[99] = "SPAM"
print L
L = {}
L[99] = "SPAM"
print L
###################################
rec = {}
rec["name"] ="xin"
rec["age"] = "33"
rec["job"] = ["it","worker"]
print rec, rec["name"],rec["job"][1]
# 创建字典
dict(name = "mel",age =45)
dict([("name","mel"),("age",45)])
d = dict.fromkeys(["a","b"],0) #对所有键初始化
print d
k = d.keys()
print k
v = d.values()
print v
l = d.items()
print l
for k in d.keys():
print k
for key in d:
print key
####################################
# 第9章:元组,文件
|
# Use for loop in dictionary
"""list1 = ["Shivam", "Rohilla", "Shubham", "Harry"]
for item in list1:
print(item)"""
# List in List
"""list2 = [["Shivam", 20], ["Rohilla", 21], ["Shubham", 25], ["Harry", 30]]
for item, age in list2:
print(item, age)
"""
# Use for loop in dictionary
list1 = [["Shivam", 20], ["Rohilla", 21], ["Shubham", 25], ["Harry", 30]]
dict1 = dict(list1)
for item, age in dict1.items():
print(item, "and age is", age) |
import torch
import utils
import gc
import pickle
from torch.utils.data import DataLoader
from ValuesDataset import ValuesDataset, CreateSubSet, TrainingSubset
from modelset import ModelSet
from tensorboardX import SummaryWriter
frame_file = 'gooddata0.pt'
state_file = "./state.pt"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
workers = 0 #(4 if device == "cpu" else 0)
writer = SummaryWriter()
DEBUG_QUICK = False
class PremotorCortex():
def __init__(self):
self.data = ValuesDataset(frame_file,device)
self.modelsets = []
self.primeModel = None
self.batch_count = 20000 #(100 if DEBUG_QUICK else 20000) #10000 frames is aprox a full games worth (including overtime at 30fps)
self.train_batchsize = (100 if DEBUG_QUICK else 500)
print(device)
return
def cacheData(self):
if not self.data.use_cache:
print("loading data to cache")
for i in range(len(self.data)):
o = self.data.__getitem__(i)
self.data.use_cache = True
def primeModels(self):
from modelset import ModelSet
iter = 0
self.primeModel = ModelSet(16, 8, device)
self.primeModel._name = " " + self.primeModel._name
self.loader = DataLoader(self.data, batch_size=self.train_batchsize, shuffle=True, num_workers=workers)
self.time_start = utils.TimestampMillisec64()
while iter < 500000:
for batch_iter, batch_sample in enumerate(self.loader):
batch_size = eval_size = len(batch_sample[0])
iter += batch_size
time_train_start = utils.TimestampMillisec64()
self.primeModel.TrainSelector(batch_sample)
self.primeModel.TrainSelector(batch_sample, train_for_invalid=True, train_with_invalid=True)
self.primeModel.TrainCreator(batch_sample)
time_train_stop = time_eval_start = utils.TimestampMillisec64()
pct, loss = self.primeModel.Evaluation(batch_sample)
time_cur = utils.TimestampMillisec64()
run_mins = (time_cur - self.time_start) / 1000 / 60
train_rate = batch_size / (time_train_stop - time_train_start) * 1000
eval_rate = (0 if (time_cur - time_eval_start) == 0 else eval_size / (time_cur - time_eval_start) * 1000)
selector_acc = pct * 100
action_loss = loss
print("Iter: {0:7d} Trains/s: {3:8.1f} Evals/s: {4:8.1f} run_mins: {5:5.1f} selector_acc: {1:7.3f} action_loss: {2:7.6f}".format(iter, selector_acc,action_loss, train_rate, eval_rate, run_mins))
if DEBUG_QUICK: break
if DEBUG_QUICK: break
gc.collect()
def mainloop(self):
self.cacheData()
self.hopper_col = []
self.hopper_max = 1#(5 if DEBUG_QUICK else 10)
self.hopper_mask = torch.zeros(len(self.data), device=device, dtype=torch.uint8)
# self.hopper_col.append(self.primeModel.clone())
# self.hopper_col.append(self.primeModel.clone())
self.MaskLessThan(0.01)
self.boarditer = 0
while(True): #todo - exit loop is nothing is left to categorize
while len(self.hopper_col) < 2:
self.hopper_col.append(self.primeModel.clone())
mask = torch.eq(self.hopper_mask, 0) #flip
self.hopper_subset = CreateSubSet(self.data, mask)
self.hopper_train()
self.hopper_evaluate()
self.hopper_promote()
gc.collect()
self.boarditer += 1
return
def MaskLessThan(self, loss_limit):
mask = torch.eq(self.hopper_mask, 0)
self.hopper_subset = CreateSubSet(self.data, mask)
evalLoader = DataLoader(self.hopper_subset, batch_size=self.batch_count, num_workers=workers)
losses = None
for batch_iter, batch_sample in enumerate(evalLoader):
_losses = self.primeModel.getCreatorLoss(batch_sample)
if losses is None:
losses = _losses
else:
losses = torch.cat((losses,_losses))
self.hopper_mask += losses.le(loss_limit).flatten()
def hopper_train(self):
_s = len(self.hopper_col)
s = "training hopper collection -- size: {0}"
print(s.format(_s))
evalLoader = DataLoader(self.hopper_subset, batch_size=self.batch_count, num_workers=workers)
all_losses = None
all_pcts = None
for m_index in range(_s):
losses = None
pcts = None
for batch_iter, batch_sample in enumerate(evalLoader):
_losses = self.hopper_col[m_index].getCreatorLoss(batch_sample)
#_pcts = self.hopper_col[m_index].getSelectorPct(batch_sample)
if losses is None:
losses = _losses
#pcts = _pcts
else:
losses = torch.cat((losses,_losses))
#cts = torch.cat((pcts,_pcts))
if all_losses is None:
all_losses = losses
eq_losses = torch.ones(len(self.hopper_subset), dtype=torch.uint8)
#all_pcts = pcts
else:
all_losses = torch.cat((all_losses, losses), dim=1)
#eq_losses = torch.eq(all_losses[:, m_index - 1], all_losses[:, m_index]) * eq_losses
#all_pcts = torch.cat((all_pcts, pcts), dim=1)
#rnd = torch.randint_like(eq_losses, low=0, high=_s)
ranks_loss = torch.argsort(all_losses, descending=True)
ranks_loss = torch.t(ranks_loss)
#ranks_pcts = torch.argsort(all_pcts)
#ranks_pcts = torch.t(ranks_pcts)
toRemove = []
toClone = []
for m_index in range(_s):
mset = self.hopper_col[m_index]
ignoreMask = all_losses[:, m_index].le(mset.UpdateLimiter())
goodMask = ranks_loss[m_index].ge(1)
badMask = torch.eq(goodMask, 0) #flip
# goodMask_loss = ranks_loss[m_index].ge(gr)
# badMask_loss = ranks_loss[m_index].le(br)
# goodMask_pct = ranks_pcts[m_index].ge(gr)
# badMask_pct = ranks_pcts[m_index].le(br)
# goodMask = (goodMask_loss + goodMask_pct).ge(1)
# badMask = badMask_loss * badMask_pct
#break up ties
#m_rnd = torch.eq(rnd, m_index)
#m_eq_losses = torch.eq(eq_losses * m_rnd, 0)
goodMask *= ignoreMask #* m_eq_losses
badMask *= ignoreMask #* m_eq_losses
ignoreIndic = torch.nonzero(ignoreMask)
suffex = ("_limit" if mset.LimiterNear() else "")
writer.add_histogram(mset.name() + "/losses" + suffex, all_losses[:, m_index].clone().cpu().data.numpy(), self.boarditer)
#writer.add_histogram(mset.name() + "/ignoreIndic", ignoreIndic.clone().cpu().data.numpy(), self.boarditer)
trainingSubset = TrainingSubset(self.hopper_subset, goodMask, badMask, min_repeat=5)
if len(trainingSubset.goodIndices) > 0:
s = "training hopper collection: name:{0:>27} good: {1} bad: {2}"
print(s.format(mset.name(), len(trainingSubset.goodIndices), len(trainingSubset.badIndices)))
writer.add_histogram(mset.name() + "/good" + suffex, trainingSubset.goodIndices.clone().cpu().data.numpy(), self.boarditer)
writer.add_scalar(mset.name() + "/Num_Good", len(trainingSubset.goodIndices), self.boarditer)
writer.add_scalar(mset.name() + "/Num_Bad", len(trainingSubset.badIndices), self.boarditer)
writer.add_scalar(mset.name() + "/Num_Ignore", len(ignoreIndic), self.boarditer)
toClone.append(mset)
if len(trainingSubset) > 0:
trainingLoader = DataLoader(trainingSubset, batch_size=self.train_batchsize, shuffle=True, num_workers=workers)
for batch_iter, batch_sample in enumerate(trainingLoader):
mset.TrainSelector_hopper(batch_sample)
mset.TrainCreator_hopper(batch_sample)
elif len(trainingSubset.badIndices) > 0:
s = "training hopper collection: name:{0:>27} good: {1} bad: {2} -removing"
print(s.format(mset.name(), len(trainingSubset.goodIndices), len(trainingSubset.badIndices)))
toRemove.append(self.hopper_col[m_index])
else:
s = "training hopper collection: name:{0:>27} good: {1} bad: {2} - Skipping"
print(s.format(mset.name(), len(trainingSubset.goodIndices), len(trainingSubset.badIndices)))
mset.coverageMask = None
# for mset in toRemove:
# self.hopper_col.remove(mset)
# for mset in toClone:
# self.hopper_col.append(mset.clone())
def hopper_evaluate(self):
print("eval hopper collection")
_s = len(self.hopper_col)
evalLoader = DataLoader(self.data, batch_size=self.batch_count, num_workers=workers) #using full data, not hopper
for m_index in range(_s):
coverage = None
for batch_iter, batch_sample in enumerate(evalLoader):
if coverage is None:
coverage = self.hopper_col[m_index].GetCoverage(batch_sample, max_loss=0.001, min_pct=0.99)
else:
coverage = torch.cat((coverage,self.hopper_col[m_index].GetCoverage(batch_sample,max_loss=0.001, min_pct=0.99)))
self.hopper_col[m_index].CoverageAppend(coverage)
def hopper_promote(self):
print("promoting hopper collection")
_s = len(self.hopper_col)
toRemove = []
for m_index in range(_s):
mset = self.hopper_col[m_index]
if mset.LimiterReached():
covMask = mset.GetCoverageMask()
self.hopper_mask = self.hopper_mask + covMask
self.modelsets.append(mset)
toRemove.append(mset)
for mset in toRemove:
self.hopper_col.remove(mset)
|
# Generated by Django 2.0.1 on 2018-03-31 16:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Prevent', '0002_auto_20170831_0134'),
]
operations = [
migrations.AlterModelOptions(
name='prevent',
options={},
),
migrations.AlterModelTable(
name='prevent',
table='Prevent',
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# C++ version Copyright (c) 2006-2007 Erin Catto http://www.box2d.org
# Python version Copyright (c) 2010 kne / sirkne at gmail dot com
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from framework import *
class EdgeTest (Framework):
name="EdgeTest"
description = "Utilizes b2EdgeShape"
def __init__(self):
super(EdgeTest, self).__init__()
v1=(-10.0, 0.0)
v2=(-7.0, -1.0)
v3=(-4.0, 0.0)
v4=(0.0, 0.0)
v5=(4.0, 0.0)
v6=(7.0, 1.0)
v7=(10.0, 0.0)
ground=self.world.CreateStaticBody(shapes=
[b2EdgeShape(vertices=[None, v1, v2, v3]),
b2EdgeShape(vertices=[ v1, v2, v3, v4]),
b2EdgeShape(vertices=[ v2, v3, v4, v5]),
b2EdgeShape(vertices=[ v3, v4, v5, v6]),
b2EdgeShape(vertices=[ v4, v5, v6, v7]),
b2EdgeShape(vertices=[ v5, v6, v7 ]),
])
box=self.world.CreateDynamicBody(
position=(0.5, 0.6),
allowSleep=False,
shapes=b2PolygonShape(box=(0.5,0.5))
)
if __name__=="__main__":
main(EdgeTest)
|
#scikit-learnを学ぶ
#Chainer Tutorials のやり方を参考にする
from matplotlib import pyplot as plt
'''
Step1 : データセットの準備
'''
from sklearn.datasets import load_digits
dataset = load_digits()
x = dataset.data
t = dataset.target
print("x_shape : ",x.shape)
print("t_shape : ",t.shape)
# データセットを分割する関数の読み込み
from sklearn.model_selection import train_test_split
# 訓練用データセットとテスト用データセットへの分割
x_train, x_test, t_train, t_test = train_test_split(x, t, test_size=0.3, random_state=0)
'''
前処理1 : 標準化
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_train)
# 平均
scaler.mean_
# 分散
scaler.var_
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
#結果はやはり変わらなかった
'''
'''
前処理2 : べき変換
from sklearn.preprocessing import PowerTransformer
scaler = PowerTransformer()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
#結果は悪くなった
'''
'''
Step2~4 : モデル・目的関数・最適手法を決める
'''
from sklearn.linear_model import LinearRegression
# モデルの定義
reg_model = LinearRegression()
'''
Step5 : モデルの訓練
'''
# モデルの訓練
reg_model.fit(x_train, t_train)
# 訓練後のパラメータ w
reg_model.coef_
# 訓練後のバイアス b
reg_model.intercept_
# 精度の検証
print("train score : ",reg_model.score(x_train, t_train))
#テスト用データセットで評価
print("test score : ",reg_model.score(x_test, t_test)) |
start = input("Введите начальное значение ")
end = input("Введите конечное значение ")
day = 1
if str(start).isdigit() and str(end).isdigit():
if end >= start:
result = start
while float(result) < float(end):
day = day + 1
result = float(result) + float(result)/10
print("Номер дня", day)
else:
print("Ошибка!Конечное значение больше начального")
else:
print("Введены некорректные начальные данные")
|
#!/usr/bin/env python2.7
import os
import urllib
import json
import re
import subprocess
from glob import glob
SCRIPTS_URL = "https://bot.lua.run/u/anders/scripts.lua?json"
MAP_URL = "https://bot.lua.run/u/anders/web_useremails.lua"
# git settings
GIT = "git"
AUTHOR_NAME = "L. Bot"
AUTHOR_EMAIL = "luabot@codebust.com"
BRANCH_NAME = "master"
REMOTE_NAME = "origin"
USER_MAP = {}
def extend_list(a, b):
a.extend(b)
return a
def main():
script_path = os.path.realpath(__file__)
root = os.path.dirname(script_path)
# retrieve user->name, email mapping
req = urllib.urlopen(MAP_URL)
resp = req.read()
req.close()
USER_MAP = json.loads(resp)
req = urllib.urlopen(SCRIPTS_URL)
resp = req.read()
req.close()
j = json.loads(resp)
# UNIX timestamp of the last sync commit.
lastrun = 0
os.environ["GIT_COMMITTER_NAME"] = AUTHOR_NAME
os.environ["GIT_COMMITTER_EMAIL"] = AUTHOR_EMAIL
try:
p = subprocess.Popen([GIT, "log", "-1", "master", "--format=%ct",
"--grep", "Sync."], stdout=subprocess.PIPE)
lastrun = int(p.communicate()[0])
except:
pass
fnull = open(os.devnull, "w")
scripts_by_user = {}
path_uid = {}
# Add new or modified scripts.
for mod in j:
assert re.match(r"^[a-z]*$", mod)
if not os.path.isdir(mod):
os.mkdir(mod)
for fun in j[mod]:
assert re.match(r"^[a-zA-Z0-9_]+$", fun)
path = os.path.join(root, mod, fun + ".lua")
new = not os.path.exists(path)
if not os.path.exists(path) or lastrun < j[mod][fun]["mtime"]:
req = urllib.urlopen(j[mod][fun]["url"].replace("http:", "https:"))
resp = req.read()
with open(path, "w") as f:
f.write(resp)
user = scripts_by_user.setdefault(j[mod][fun]["owner"], [])
user.append(path)
path_uid[path] = j[mod][fun]["uid"]
# Find out if a script was deleted or not.
# Done by finding Lua scripts that weren't in the list fetched earlier.
# If a deleted script is found, git rm it.
deleted_scripts = []
for mod in j:
for path in glob(os.path.join(root, mod) + "/*.lua"):
name = os.path.splitext(os.path.basename(path))[0]
if name not in j[mod]:
deleted_scripts.append(path)
for user in scripts_by_user:
for path in scripts_by_user[user]:
subprocess.check_call([GIT, "add", path], stdout=fnull)
# opt-in author, otherwise semi-anonymous
userinfo = USER_MAP.get(user, {})
os.environ["GIT_AUTHOR_NAME"] = userinfo.get("name", "user%d" % path_uid[path])
os.environ["GIT_AUTHOR_EMAIL"] = userinfo.get("email", "user%d@codebust.com" % path_uid[path])
subprocess.check_call(extend_list([GIT, "commit", "-m", "Sync."], scripts_by_user[user]), stdout=fnull)
os.environ["GIT_AUTHOR_NAME"] = AUTHOR_NAME
os.environ["GIT_AUTHOR_EMAIL"] = AUTHOR_EMAIL
if len(deleted_scripts) > 0:
subprocess.check_call(extend_list([GIT, "rm"], deleted_scripts), stdout=fnull)
subprocess.check_call(extend_list([GIT, "commit", "-m", "Delete old scripts."], deleted_scripts), stdout=fnull)
subprocess.call([GIT, "push", "-u", REMOTE_NAME, BRANCH_NAME],
stdout=fnull, stderr=fnull)
fnull.close()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# coding: utf-8
# In[16]:
from matplotlib import pyplot as plt
ages_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
salary_y = [38496, 42000, 46752, 49320, 53200,
56000, 62316, 64928, 67317, 68748, 73752]
plt.bar(ages_x, salary_y, label="All Devs")
plt.legend()
plt.title("Median Salary (USD) by Age")
plt.xlabel("Ages")
plt.ylabel("Median Salary (USD)")
plt.show()
# In[33]:
from matplotlib import pyplot as plt
ages_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
salary_y = [38496, 42000, 46752, 49320, 53200,
56000, 62316, 64928, 67317, 68748, 73752]
plt.bar(ages_x, salary_y, color="#444444", label="All Devs")
py_salary_y = [45372, 48876, 53850, 57287, 63016,
65998, 70003, 70000, 71496, 75370, 83640]
plt.plot(ages_x, py_salary_y, color="#008fd5", label="Python")
js_salary_y = [37810, 43515, 46823, 49293, 53437,
56373, 62375, 66674, 68745, 68746, 74583]
plt.plot(ages_x, js_salary_y, color="#e5ae38", label="JavaScript")
plt.legend()
plt.title("Median Salary (USD) by Age")
plt.xlabel("Ages")
plt.ylabel("Median Salary (USD)")
plt.show()
# In[29]:
import numpy as np
from matplotlib import pyplot as plt
ages_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
x_indexes=np.arange(len(ages_x))
x_indexes
# In[38]:
width=0.25
salary_y = [38496, 42000, 46752, 49320, 53200,
56000, 62316, 64928, 67317, 68748, 73752]
plt.bar(x_indexes-width, salary_y,width=width, color="#444444", label="All Devs")
py_salary_y = [45372, 48876, 53850, 57287, 63016,
65998, 70003, 70000, 71496, 75370, 83640]
plt.bar(x_indexes, py_salary_y, width=width,color="#008fd5", label="Python")
js_salary_y = [37810, 43515, 46823, 49293, 53437,
56373, 62375, 66674, 68745, 68746, 74583]
plt.bar(x_indexes+width, js_salary_y, width=width,color="#e5ae38", label="JavaScript")
plt.legend()
plt.title("Median Salary (USD) by Age")
plt.xlabel("Ages")
plt.ylabel("Median Salary (USD)")
plt.show()
# In[40]:
width=0.25
salary_y = [38496, 42000, 46752, 49320, 53200,
56000, 62316, 64928, 67317, 68748, 73752]
plt.bar(x_indexes-width, salary_y,width=width, color="#444444", label="All Devs")
py_salary_y = [45372, 48876, 53850, 57287, 63016,
65998, 70003, 70000, 71496, 75370, 83640]
plt.bar(x_indexes, py_salary_y, width=width,color="#008fd5", label="Python")
js_salary_y = [37810, 43515, 46823, 49293, 53437,
56373, 62375, 66674, 68745, 68746, 74583]
plt.bar(x_indexes+width, js_salary_y, width=width,color="#e5ae38", label="JavaScript")
plt.legend()
#To avoid having index for x axis
plt.xticks(ticks=x_indexes,labels=ages_x)
plt.title("Median Salary (USD) by Age")
plt.xlabel("Ages")
plt.ylabel("Median Salary (USD)")
plt.show()
# In[8]:
import csv
import numpy as np
from matplotlib import pyplot as plt
#Grab data from csv file
with open('data.csv') as csv_file:
csv_reader= csv.DictReader(csv_file)
row=next(csv_reader)
print(row)
# In[9]:
import csv
import numpy as np
from matplotlib import pyplot as plt
#Grab data from csv file
with open('data.csv') as csv_file:
csv_reader= csv.DictReader(csv_file)
row=next(csv_reader)
print(row['LanguagesWorkedWith'].split(';'))
# In[10]:
import csv
import numpy as np
from collections import Counter
from matplotlib import pyplot as plt
#Grab data from csv file
with open('data.csv') as csv_file:
csv_reader= csv.DictReader(csv_file)
language_counter=Counter()
for row in csv_reader:
language_counter.update(row['LanguagesWorkedWith'].split(';'))
print(language_counter)
# In[11]:
import csv
import numpy as np
from collections import Counter
from matplotlib import pyplot as plt
#Grab data from csv file
with open('data.csv') as csv_file:
csv_reader= csv.DictReader(csv_file)
language_counter=Counter()
for row in csv_reader:
language_counter.update(row['LanguagesWorkedWith'].split(';'))
print(language_counter.most_common(15))
# In[12]:
import csv
import numpy as np
from collections import Counter
from matplotlib import pyplot as plt
#Grab data from csv file
with open('data.csv') as csv_file:
csv_reader= csv.DictReader(csv_file)
language_counter=Counter()
for row in csv_reader:
language_counter.update(row['LanguagesWorkedWith'].split(';'))
languages=[]
popularity=[]
for item in (language_counter.most_common(15)):
languages.append(item[0])
popularity.append(item[1])
print(languages)
print(popularity)
# In[13]:
import csv
import numpy as np
from collections import Counter
from matplotlib import pyplot as plt
#Grab data from csv file
with open('data.csv') as csv_file:
csv_reader= csv.DictReader(csv_file)
language_counter=Counter()
for row in csv_reader:
language_counter.update(row['LanguagesWorkedWith'].split(';'))
languages=[]
popularity=[]
for item in (language_counter.most_common(15)):
languages.append(item[0])
popularity.append(item[1])
plt.barh(languages,popularity)
plt.title("Most Popular Programming Languages")
plt.xlabel("No.of people who use")
plt.show()
# In[17]:
import csv
import numpy as np
from collections import Counter
from matplotlib import pyplot as plt
#Grab data from csv file
with open('data.csv') as csv_file:
csv_reader= csv.DictReader(csv_file)
language_counter=Counter()
for row in csv_reader:
language_counter.update(row['LanguagesWorkedWith'].split(';'))
languages=[]
popularity=[]
for item in (language_counter.most_common(15)):
languages.append(item[0])
popularity.append(item[1])
languages.reverse()
popularity.reverse()
plt.barh(languages,popularity)
plt.title("Most Popular Programming Languages")
plt.xlabel("No.of people who use")
plt.show()
# In[22]:
import csv
import numpy as np
import pandas as pd
from collections import Counter
from matplotlib import pyplot as plt
plt.style.use("fivethirtyeight")
data = pd.read_csv('data.csv')
ids = data['Responder_id']
lang_responses = data['LanguagesWorkedWith']
language_counter = Counter()
for response in lang_responses:
language_counter.update(response.split(';'))
languages = []
popularity = []
for item in language_counter.most_common(15):
languages.append(item[0])
popularity.append(item[1])
languages.reverse()
popularity.reverse()
plt.barh(languages, popularity)
plt.title("Most Popular Languages")
# plt.ylabel("Programming Languages")
plt.xlabel("Number of People Who Use")
plt.show()
# In[ ]:
# In[ ]:
|
import os
import numpy
import cPickle
from amuse.units import nbody_system, units
from amuse.io import write_set_to_file
from amuse.ic.kroupa import new_kroupa_mass_distribution
from amuse.ic.fractalcluster import new_fractal_cluster_model
from amuse.ic.plummer import new_plummer_model
from amuse.ext.spherical_model import new_gas_plummer_distribution
from amuse.ext.relax_sph import relax
from amuse.community.fi.interface import Fi
from amuse.community.gadget2.interface import Gadget2
from amuse.community.fastkick.interface import FastKick
def to_initial_conditions_directory():
initial_conditions_directory = os.path.join(os.getcwd(), "initial_conditions")
if not os.path.exists(initial_conditions_directory):
os.mkdir(initial_conditions_directory)
print "Created new initial conditions directory for output:", initial_conditions_directory
os.chdir(initial_conditions_directory)
def generate_initial_conditions(
number_of_stars = 10000,
number_of_gas_particles = 2*10**6,
star_formation_efficiency = 0.1,
virial_radius = 0.33 | units.parsec,
virial_ratio = 1.0,
use_fractal = False):
numpy.random.seed(12345678)
seed_fractal = 312357271
masses = new_kroupa_mass_distribution(number_of_stars)
total_stellar_mass = masses.sum()
total_mass = total_stellar_mass / star_formation_efficiency
converter = nbody_system.nbody_to_si(total_mass, virial_radius)
if use_fractal:
stars = new_fractal_cluster_model(number_of_stars, convert_nbody=converter, do_scale=False, fractal_dimension=1.6, random_seed=seed_fractal)
else:
stars = new_plummer_model(number_of_stars, convert_nbody=converter, do_scale=False)
stars.mass = masses
stars.move_to_center()
print "scaling positions to match virial_radius"
stars.position *= virial_radius / stars.virial_radius()
print "scaling velocities to match virial_ratio"
stars.velocity *= numpy.sqrt(virial_ratio * converter.to_si(0.5|nbody_system.energy) * star_formation_efficiency / stars.kinetic_energy())
print "new_gas_plummer_distribution"
gas = new_gas_plummer_distribution(
number_of_gas_particles,
total_mass = (total_mass - total_stellar_mass),
virial_radius = virial_radius,
type = "fcc")
gas.h_smooth = 0.0 | units.parsec
filename = "YSC_{0}_stars{1}_gas{2}k_".format("fractal" if use_fractal else "plummer",
number_of_stars, number_of_gas_particles/1000)
print "Writing initial conditions to", filename, "+ stars/gas.amuse"
write_set_to_file(stars, filename+"stars.amuse", "amuse", append_to_file=False)
write_set_to_file(gas, filename+"gas.amuse", "amuse", append_to_file=False)
with open(filename+"info.pkl", "wb") as outfile:
cPickle.dump([converter], outfile)
return stars, gas, filename
def new_hydro(gas, dynamical_timescale, converter):
if False:
hydro = Gadget2(converter, number_of_workers=8, redirection="file", redirect_file="gadget.log")
hydro.parameters.time_max = 3 * dynamical_timescale
hydro.parameters.max_size_timestep = dynamical_timescale / 100
hydro.parameters.time_limit_cpu = 1.0 | units.Gyr
else:
hydro = Fi(converter, mode='openmp', redirection="file", redirect_file="fi.log")
hydro.parameters.timestep = dynamical_timescale / 100
hydro.parameters.eps_is_h_flag = True
return hydro
def relax_initial_conditions(stars, gas, filename):
dynamical_timescale = gas.dynamical_timescale()
converter = nbody_system.nbody_to_si(dynamical_timescale, 1|units.parsec)
hydro = new_hydro(gas, dynamical_timescale, converter)
gravity_field_code = FastKick(converter, mode="gpu", number_of_workers=2)
gravity_field_code.parameters.epsilon_squared = (0.01 | units.parsec)**2
gravity_field_code.particles.add_particles(stars)
relaxed_gas = relax(gas, hydro, gravity_field=gravity_field_code,
monitor_func="energy",
bridge_options=dict(verbose=True, use_threading=False))
gravity_field_code.stop()
hydro.stop()
write_set_to_file(relaxed_gas, filename+"gas_relaxed.amuse", "amuse")
if __name__ == "__main__":
to_initial_conditions_directory()
stars, gas, filename = generate_initial_conditions()
# relax_initial_conditions(stars, gas, filename)
|
import contextlib
import uuid
from unittest import TestCase
from mock import patch, DEFAULT, MagicMock, Mock, ANY
from icommons_common.utils import Bunch
from icommons_ui.exceptions import RenderableException
from django.core.exceptions import ObjectDoesNotExist
from canvas_sdk.exceptions import CanvasAPIError
from canvas_course_site_wizard import controller
from canvas_course_site_wizard.models import (
BulkCanvasCourseCreationJob,
CanvasCourseGenerationJob,
SISCourseData,
)
from canvas_course_site_wizard.exceptions import (
CanvasCourseAlreadyExistsError,
CanvasCourseCreateError,
CanvasSectionCreateError,
CourseGenerationJobCreationError,
CourseGenerationJobNotFoundError,
SISCourseDoesNotExistError,
NoTemplateExistsForSchool
)
m_canvas_content_generation_job = Mock(
spec=CanvasCourseGenerationJob,
id=2,
canvas_course_id=9999,
sis_course_id=88323,
status_url='http://example.com/1234',
workflow_state='setup',
created_by_user_id='123'
)
@patch.multiple('canvas_course_site_wizard.controller',
get_course_data=DEFAULT, create_course_section=DEFAULT,
create_new_course=DEFAULT, get_default_template_for_school=DEFAULT)
class CreateCanvasCourseTest(TestCase):
longMessage = True
def setUp(self):
self.bulk_job_id = 10
self.bulk_job = BulkCanvasCourseCreationJob(id=self.bulk_job_id)
self.canvas_course_id = uuid.uuid4().hex
self.job_id = 1475
self.sis_course_id = "305841"
self.sis_user_id = "123456"
self.school_id = "colgsas"
def get_mock_of_get_course_data(self):
# mock the properties
course_model_mock = MagicMock(sis_account_id="school:gse",
course_code="GSE",
course_name="GSE test course",
sis_term_id="gse term")
# mock the methods
course_model_mock.primary_section_name.return_value = "Primary section"
return course_model_mock
# ------------------------------------------------------
# Tests for create_canvas_course()
# ------------------------------------------------------
@patch('canvas_course_site_wizard.controller.create_canvas_course')
def test_create_canvas_course_method_called_with_right_params(self, create_canvas_course, get_course_data,
create_course_section, create_new_course,
get_default_template_for_school):
"""
Test that controller makes create_canvas_course call with expected args
"""
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
result = create_canvas_course(self.sis_course_id, self.sis_user_id)
create_canvas_course.assert_called_with(self.sis_course_id, self.sis_user_id)
# ------------------------------------------------------
# Tests for create_canvas_course.get_course_data()
# ------------------------------------------------------
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
def test_get_course_data_method_called_with_right_params(self,
course_generation_job__objects__filter,
course_generation_job__objects__create,
update_course_generation_workflow_state,
get_course_data, create_course_section, create_new_course, get_default_template_for_school):
"""
Test that controller method makes a call to get_course_data api with expected args
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
query_set = Mock(get=Mock(return_value=job))
course_generation_job__objects__filter.return_value = query_set
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
get_course_data.assert_called_with(self.sis_course_id)
@patch('canvas_course_site_wizard.controller.logger')
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_object_not_found_exception_in_get_course_data_logs_error(
self, send_failure_msg_to_support, log_replacement,
get_course_data, create_course_section, create_new_course, get_default_template_for_school):
"""
Test that the logger.error logs error when when get_course_data throws
an ObjectDoesNotExist exception.
"""
get_course_data.side_effect = ObjectDoesNotExist
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(SISCourseDoesNotExistError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
self.assertTrue(log_replacement.error.called)
"""
Tests for create_canvas_course.CanvasCourseGenerationJob.objects.create
"""
@patch('canvas_course_site_wizard.models.CanvasCourseGenerationJob.objects.create')
def test_create_canvas_course_method_invokes_create_generation_record(self, canvas_content_gen_create,
get_course_data, create_course_section,
create_new_course, get_default_template_for_school,
**kwargs):
"""
Test that create_canvas_course method invokes a creation of CanvasCourseGenerationJob record
with workflow_state to STATUS_SETUP
"""
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
self.assertTrue(canvas_content_gen_create.called)
canvas_content_gen_create.assert_called_with(sis_course_id=self.sis_course_id, created_by_user_id=self.sis_user_id,
workflow_state=CanvasCourseGenerationJob.STATUS_SETUP)
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.models.CanvasCourseGenerationJob.objects.create')
def test_create_canvas_course_method_does_not_invoke_create_generation_record_for_bulk_job(
self, canvas_content_gen_create,
course_generation_job__objects__filter,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school, **kwargs):
"""
Test that create_canvas_course method does not try to create
CanvasCourseGenerationJob record for courses created by bulk job as well
"""
query_set = Mock(get=Mock(return_value=Mock(spec=CanvasCourseGenerationJob)))
course_generation_job__objects__filter.return_value = query_set
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id,
self.bulk_job)
self.assertFalse(canvas_content_gen_create.called)
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob')
def test_create_canvas_course_method_creates_generation_record(self, canvas_content_gen_db_mock, get_course_data,
create_course_section, create_new_course,
get_default_template_for_school, **kwargs):
"""
Test that create_canvas_course method creates a CanvasCourseGenerationJob record with right parameters
"""
workflow_mock = MagicMock(workflow_status=CanvasCourseGenerationJob.STATUS_SETUP)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
args, kwargs = canvas_content_gen_db_mock.objects.create.call_args
canvas_content_gen_db_mock.objects.create.assert_called_with(sis_course_id=self.sis_course_id,
created_by_user_id=self.sis_user_id,
workflow_state=ANY)
@patch('canvas_course_site_wizard.controller.logger')
@patch('canvas_course_site_wizard.models.CanvasCourseGenerationJob.objects.create')
def test_create_canvas_course_method_logs_on_job_creation_exception(self, canvas_content_gen_db_mock, logger,
get_course_data,
create_course_section, create_new_course,
get_default_template_for_school, **kwargs):
"""
Test that create_canvas_course method logs an error when CanvasCourseGenerationJob creation has an exception
"""
canvas_content_gen_db_mock.side_effect= Exception
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(Exception):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
self.assertTrue(logger.exception.called)
@patch('canvas_course_site_wizard.models.CanvasCourseGenerationJob.objects.create')
def test_custome_error_raised_when_job_creation_has_exception(self, canvas_content_gen_db_mock, get_course_data,
create_course_section, create_new_course,
get_default_template_for_school):
"""
Test to assert that a CourseGenerationJobCreationError is raised when CanvasCourseGenerationJob creation has an exception
"""
canvas_content_gen_db_mock.side_effect= Exception
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CourseGenerationJobCreationError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
def test_404_exception_n_create_new_course_method_invokes_update_workflow_state(self, update_mock,
get_course_data,
create_course_section,
create_new_course,
get_default_template_for_school):
"""
A RenderableException should be raised and and
update_content_generation_workflow_state() is invoked
when the create_new_course SDK call throws an CanvasAPIError
"""
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
create_new_course.side_effect = CanvasAPIError(status_code=404)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CanvasCourseCreateError):
controller.create_canvas_course(
self.sis_course_id,
self.sis_user_id
)
update_mock.assert_called_with(
self.sis_course_id,
CanvasCourseGenerationJob.STATUS_SETUP_FAILED,
course_job_id=ANY,
bulk_job_id=None
)
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
def test_404_exception_n_create_new_course_method_invokes_update_workflow_state_with_bulk_job_id(
self, update_mock, course_generation_job__objects__filter,
get_course_data, create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that a CanvasCourseCreateError is raised when the
create_new_course SDK call throws a CanvasAPIError,
and update_content_migration_workflow_state is invoked to update the
status to STATUS_SETUP_FAILED
"""
query_set = Mock(get=Mock(return_value=Mock(spec=CanvasCourseGenerationJob)))
course_generation_job__objects__filter.return_value = query_set
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
create_new_course.side_effect = CanvasAPIError(status_code=404)
with self.assertRaises(CanvasCourseCreateError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id,
bulk_job=self.bulk_job)
update_mock.assert_called_with(
self.sis_course_id, CanvasCourseGenerationJob.STATUS_SETUP_FAILED,
course_job_id=None, bulk_job_id=self.bulk_job_id)
# ------------------------------------------------------
# Tests for create_canvas_course.create_course_section()
# ------------------------------------------------------
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.controller.logger')
@patch('canvas_course_site_wizard.controller.SDK_CONTEXT')
def test_create_new_course_method_is_called_with_proper_arguments(self,
SDK_CONTEXT, logger, course_generation_job__objects__filter,
course_generation_job__objects__create,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that create_new_course method is called by
create_canvas_course controller method with appropriate arguments
(collapses a bunch of individual parameter tests)
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
query_set = Mock(get=Mock(return_value=job))
course_generation_job__objects__filter.return_value = query_set
course_model_mock = self.get_mock_of_get_course_data()
get_course_data.return_value = course_model_mock
sis_account_id_argument = 'sis_account_id:' + course_model_mock.sis_account_id
course_code_argument = course_model_mock.course_code
course_name_argument = course_model_mock.course_name
course_term_id_argument = 'sis_term_id:' + course_model_mock.sis_term_id
course_sis_course_id_argument = self.sis_course_id
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
create_new_course.assert_called_with(
request_ctx=SDK_CONTEXT,
account_id=sis_account_id_argument,
course_name=course_name_argument,
course_course_code=course_code_argument,
course_term_id=course_term_id_argument,
course_sis_course_id=course_sis_course_id_argument,
)
def test_exception_when_create_new_course_method_raises_api_400(self, get_course_data, create_course_section,
create_new_course, get_default_template_for_school):
"""
Test to assert that a CanvasCourseAlreadyExistsError is raised when the create_new_course method
throws a CanvasAPIError
"""
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
create_new_course.side_effect = CanvasAPIError(status_code=400)
with self.assertRaises(CanvasCourseAlreadyExistsError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_exception_when_create_new_course_method_raises_api_404(self, send_failure_msg_to_support, get_course_data,
create_course_section, create_new_course,
get_default_template_for_school):
"""
Test to assert that a RenderableException is raised when the create_new_course SDK call
throws an CanvasAPIError
"""
create_new_course.side_effect = CanvasAPIError(status_code=404)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CanvasCourseCreateError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
# ------------------------------------------------------
# Tests for create_canvas_course.create_course_section()
# ------------------------------------------------------
@patch('canvas_course_site_wizard.controller.SDK_CONTEXT')
def test_create_course_section_method_is_called(self, SDK_CONTEXT, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that create_new_course method is called by create_canvas_course controller method
"""
course_model_mock = self.get_mock_of_get_course_data()
get_course_data.return_value = course_model_mock
mock_canvas_course_id = '12345'
mock_primary_section_name = course_model_mock.primary_section_name.return_value
create_new_course.return_value.json.return_value = {'id': mock_canvas_course_id}
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
create_course_section.assert_called_with(request_ctx=SDK_CONTEXT, course_id=mock_canvas_course_id,
course_section_name=mock_primary_section_name,
course_section_sis_section_id=self.sis_course_id)
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_when_create_course_section_method_raises_api_error(self,
course_generation_job__objects__create,
update_course_generation_workflow_state, send_failure_msg_to_support,
get_course_data, create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that a RenderableException is raised when the create_course_section SDK call
throws an CanvasAPIError
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
create_course_section.side_effect = CanvasAPIError(status_code=400)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(RenderableException):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id, None)
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_object_not_found_exception_in_get_course_data_sends_support_email(self, send_failure_msg_to_support,
get_course_data, create_course_section,
create_new_course, get_default_template_for_school):
"""
Test to assert that a support email is sent when get_course_data raises an ObjectDoesNotExist
"""
get_course_data.side_effect = ObjectDoesNotExist
exception_data = SISCourseDoesNotExistError(self.sis_course_id)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(SISCourseDoesNotExistError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
send_failure_msg_to_support.assert_called_with(self.sis_course_id, self.sis_user_id, exception_data.display_text)
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_object_not_found_exception_in_get_course_data_doesnt_send_support_email_for_bulk_created_course(
self, course_generation_job__objects__create,
send_failure_msg_to_support,
get_course_data, create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that for a course that is created as part of a bulk job,
the support email is not sent when get_course_data raises an
ObjectDoesNotExist
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
get_course_data.side_effect = ObjectDoesNotExist
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CourseGenerationJobNotFoundError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id,
self.bulk_job)
self.assertFalse(send_failure_msg_to_support.called)
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_canvas_course_create_error_sends_support_email(self, send_failure_msg_to_support, get_course_data,
create_course_section, create_new_course,
get_default_template_for_school):
"""
Test to assert that a support email is sent when there is an CanvasAPIError resulting in CanvasCourseCreateError
and that the correct error message from the exception is sent as a param to the mail helper method
"""
create_new_course.side_effect = CanvasAPIError(status_code=404)
exception_data = CanvasCourseCreateError(msg_details=self.sis_course_id)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CanvasCourseCreateError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
send_failure_msg_to_support.assert_called_with(self.sis_course_id, self.sis_user_id, exception_data.display_text)
self.assertTrue('Error: SIS ID not applied for CID' in exception_data.display_text)
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_canvas_course_create_error_doesnt_send_support_email_for_bulk_created_course(
self, send_failure_msg_to_support,
course_generation_job__objects__filter,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that a for a course that is created as part of a bulk
job, the support email is not sent when there is an CanvasAPIError
resulting in CanvasCourseCreateError.
"""
query_set = Mock(get=Mock(return_value=Mock(spec=CanvasCourseGenerationJob)))
course_generation_job__objects__filter.return_value = query_set
create_new_course.side_effect = CanvasAPIError(status_code=404)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CanvasCourseCreateError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id,
self.bulk_job)
self.assertFalse(send_failure_msg_to_support.called)
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_canvas_section_error_sends_support_email(self,
send_failure_msg_to_support,
course_generation_job__objects__filter,
course_generation_job__objects__create,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that a support email is sent when there is a
CanvasAPIError resulting in CanvasSectionCreateError.
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
query_set = Mock(get=Mock(return_value=job))
course_generation_job__objects__filter.return_value = query_set
create_course_section.side_effect = CanvasAPIError(status_code=400)
exception_data = CanvasSectionCreateError(self.sis_course_id)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CanvasSectionCreateError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
send_failure_msg_to_support.assert_called_with(self.sis_course_id,
self.sis_user_id,
exception_data.display_text)
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_canvas_section_error_doesnt_send_support_email_for_bulk_created_course(
self, send_failure_msg_to_support,
course_generation_job__objects__filter,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that a support email is NOT sent for bulk created courses, when there is an CanvasAPIError
resulting in CanvasSectionCreateError
"""
query_set = Mock(get=Mock(return_value=Mock(spec=CanvasCourseGenerationJob)))
course_generation_job__objects__filter.return_value = query_set
create_course_section.side_effect = CanvasAPIError(status_code=400)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CanvasSectionCreateError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id,
self.bulk_job)
self.assertFalse(send_failure_msg_to_support.called)
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_canvas_course_exists_error_doesnt_send_support_email(self, send_failure_msg_to_support, get_course_data,
create_course_section, create_new_course,
get_default_template_for_school):
"""
Test to assert that a support email is NOT sent when canvas course already exists
(there is a CanvasCourseAlreadyExistsError)
"""
create_new_course.side_effect = CanvasAPIError(status_code=400)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CanvasCourseAlreadyExistsError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
self.assertFalse(send_failure_msg_to_support.called)
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_canvas_section_error_sets_support_notified(self,
send_failure_msg_to_support,
course_generation_job__objects__filter,
course_generation_job__objects__create,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that support_notified is set on CanvasSectionCreateError
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
query_set = Mock(get=Mock(return_value=job))
course_generation_job__objects__filter.return_value = query_set
create_course_section.side_effect = CanvasAPIError(status_code=400)
exception_data = CanvasSectionCreateError(self.sis_course_id)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CanvasSectionCreateError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
self.assertTrue(exception_data.support_notified)
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_canvas_course_create_error_sets_support_notified(self, send_failure_msg_to_support, get_course_data,
create_course_section, create_new_course,
get_default_template_for_school):
"""
Test to assert that support_notified is set on CanvasCourseCreateError
"""
create_new_course.side_effect = CanvasAPIError(status_code=404)
exception_data = CanvasCourseCreateError(msg_details=self.sis_course_id)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CanvasCourseCreateError):
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
self.assertTrue(exception_data.support_notified)
@patch('canvas_course_site_wizard.controller.send_failure_msg_to_support')
def test_canvas_course_already_exists_error_doesnt_set_support_notified(self, send_failure_msg_to_support,
get_course_data, create_course_section,
create_new_course, get_default_template_for_school):
"""
Test to assert that support_notified is NOT set on CanvasCourseAlreadyExistsError
"""
create_new_course.side_effect = CanvasAPIError(status_code=400)
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
with self.assertRaises(CanvasCourseAlreadyExistsError) as cm:
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
self.assertTrue('support_notified' not in cm.exception)
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
def test_canvas_course_id_saved_to_canvas_course_generation_job_single(self,
course_generation_job__objects__create,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Ensures that the canvas course id is saved to the
CanvasCourseGenerationJob
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
# don't edit the class-wide create_new_course mock
with patch('canvas_course_site_wizard.controller.create_new_course') as create_new_course:
create_new_course().json.return_value = {'id': self.canvas_course_id}
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
self.assertEqual(job.canvas_course_id, self.canvas_course_id)
job.save.assert_called_with(update_fields=['canvas_course_id'])
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
def test_canvas_course_id_saved_to_canvas_course_generation_job_bulk(self,
course_generation_job__objects__filter,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Ensures that the canvas course id is saved to the
CanvasCourseGenerationJob
"""
job = Mock(spec=CanvasCourseGenerationJob())
query_set = Mock(get=Mock(return_value=job))
course_generation_job__objects__filter.return_value = query_set
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
# don't edit the class-wide create_new_course mock
with patch('canvas_course_site_wizard.controller.create_new_course') as create_new_course:
create_new_course().json.return_value = {'id': self.canvas_course_id}
controller.create_canvas_course(self.sis_course_id, self.sis_user_id,
self.bulk_job)
self.assertEqual(job.canvas_course_id, self.canvas_course_id)
job.save.assert_called_with(update_fields=['canvas_course_id'])
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
def test_canvas_course_id_saved_to_course_instance_single(self,
course_generation_job__objects__create,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Ensures that the canvas course id is saved to the CourseInstance
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
# don't edit the class-wide create_new_course mocks
with contextlib.nested(
patch('canvas_course_site_wizard.controller.get_course_data'),
patch('canvas_course_site_wizard.controller.create_new_course')
) as (get_course_data, create_new_course):
course_data = MagicMock(spec=SISCourseData())
get_course_data.return_value = course_data
create_new_course().json.return_value = {'id': self.canvas_course_id}
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
self.assertEqual(course_data.canvas_course_id, self.canvas_course_id)
course_data.save.assert_called_with(update_fields=['canvas_course_id'])
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
def test_canvas_course_id_saved_to_course_instance_bulk(self,
course_generation_job__objects__filter,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Ensures that the canvas course id is saved to the CourseInstance
"""
job = Mock(spec=CanvasCourseGenerationJob())
query_set = Mock(get=Mock(return_value=job))
course_generation_job__objects__filter.return_value = query_set
# don't edit the class-wide create_new_course mocks
with contextlib.nested(
patch('canvas_course_site_wizard.controller.get_course_data'),
patch('canvas_course_site_wizard.controller.create_new_course')
) as (get_course_data, create_new_course):
course_data = MagicMock(spec=SISCourseData())
get_course_data.return_value = course_data
create_new_course().json.return_value = {'id': self.canvas_course_id}
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(self.school_id)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id,
self.bulk_job)
self.assertEqual(course_data.canvas_course_id, self.canvas_course_id)
course_data.save.assert_called_with(update_fields=['canvas_course_id'])
@patch('canvas_course_site_wizard.controller.get_single_course_courses')
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.controller.logger')
@patch('canvas_course_site_wizard.controller.SDK_CONTEXT')
def test_create_new_course_called_with_default_template_params(self,
SDK_CONTEXT, logger, course_generation_job__objects__filter,
course_generation_job__objects__create,
update_course_generation_workflow_state, get_template_course, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that create_new_course method is called by
create_canvas_course controller method with appropriate arguments
(collapses a bunch of individual parameter tests)
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
query_set = Mock(get=Mock(return_value=job))
course_generation_job__objects__filter.return_value = query_set
course_model_mock = self.get_mock_of_get_course_data()
get_template_course.return_value = Bunch(json=lambda: {
'is_public': True,
'public_syllabus': True,
'is_public_to_auth_users': True
})
get_course_data.return_value = course_model_mock
sis_account_id_argument = 'sis_account_id:' + course_model_mock.sis_account_id
course_code_argument = course_model_mock.course_code
course_name_argument = course_model_mock.course_name
course_term_id_argument = 'sis_term_id:' + course_model_mock.sis_term_id
course_sis_course_id_argument = self.sis_course_id
get_default_template_for_school.return_value = Bunch(template_id='12345')
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
create_new_course.assert_called_with(
request_ctx=SDK_CONTEXT,
account_id=sis_account_id_argument,
course_name=course_name_argument,
course_course_code=course_code_argument,
course_term_id=course_term_id_argument,
course_sis_course_id=course_sis_course_id_argument,
course_is_public_to_auth_users=True,
course_is_public=True,
course_public_syllabus=True
)
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.controller.logger')
@patch('canvas_course_site_wizard.controller.SDK_CONTEXT')
def test_create_new_course_called_with_no_default_template_params(self,
SDK_CONTEXT, logger, course_generation_job__objects__filter,
course_generation_job__objects__create,
update_course_generation_workflow_state, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that create_new_course method is called by
create_canvas_course controller method with appropriate arguments
(collapses a bunch of individual parameter tests)
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
query_set = Mock(get=Mock(return_value=job))
course_generation_job__objects__filter.return_value = query_set
course_model_mock = self.get_mock_of_get_course_data()
get_course_data.return_value = course_model_mock
sis_account_id_argument = 'sis_account_id:' + course_model_mock.sis_account_id
course_code_argument = course_model_mock.course_code
course_name_argument = course_model_mock.course_name
course_term_id_argument = 'sis_term_id:' + course_model_mock.sis_term_id
course_sis_course_id_argument = self.sis_course_id
get_default_template_for_school.side_effect = NoTemplateExistsForSchool(school_id=self.school_id)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id)
create_new_course.assert_called_with(
request_ctx=SDK_CONTEXT,
account_id=sis_account_id_argument,
course_name=course_name_argument,
course_course_code=course_code_argument,
course_term_id=course_term_id_argument,
course_sis_course_id=course_sis_course_id_argument,
course_is_public_to_auth_users=False
)
@patch('canvas_course_site_wizard.controller.get_single_course_courses')
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.controller.logger')
@patch('canvas_course_site_wizard.controller.SDK_CONTEXT')
def test_create_new_course_called_with_bulk_template_params(self,
SDK_CONTEXT, logger, course_generation_job__objects__filter,
course_generation_job__objects__create,
update_course_generation_workflow_state, get_template_course, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that create_new_course method is called by
create_canvas_course controller method with appropriate arguments
(collapses a bunch of individual parameter tests)
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
query_set = Mock(get=Mock(return_value=job))
course_generation_job__objects__filter.return_value = query_set
course_model_mock = self.get_mock_of_get_course_data()
get_template_course.return_value = Bunch(json=lambda: {
'is_public': True,
'public_syllabus': True,
'is_public_to_auth_users': True
})
get_course_data.return_value = course_model_mock
sis_account_id_argument = 'sis_account_id:' + course_model_mock.sis_account_id
course_code_argument = course_model_mock.course_code
course_name_argument = course_model_mock.course_name
course_term_id_argument = 'sis_term_id:' + course_model_mock.sis_term_id
course_sis_course_id_argument = self.sis_course_id
bulk_job = BulkCanvasCourseCreationJob(id=self.bulk_job_id, template_canvas_course_id=12345)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id, bulk_job)
assert not get_default_template_for_school.called
create_new_course.assert_called_with(
request_ctx=SDK_CONTEXT,
account_id=sis_account_id_argument,
course_name=course_name_argument,
course_course_code=course_code_argument,
course_term_id=course_term_id_argument,
course_sis_course_id=course_sis_course_id_argument,
course_is_public_to_auth_users=True,
course_is_public=True,
course_public_syllabus=True
)
@patch('canvas_course_site_wizard.controller.get_single_course_courses')
@patch('canvas_course_site_wizard.controller.update_course_generation_workflow_state')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.create')
@patch('canvas_course_site_wizard.controller.CanvasCourseGenerationJob.objects.filter')
@patch('canvas_course_site_wizard.controller.logger')
@patch('canvas_course_site_wizard.controller.SDK_CONTEXT')
def test_create_new_course_called_with_no_bulk_template_params(self,
SDK_CONTEXT, logger, course_generation_job__objects__filter,
course_generation_job__objects__create,
update_course_generation_workflow_state, get_template_course, get_course_data,
create_course_section, create_new_course, get_default_template_for_school):
"""
Test to assert that create_new_course method is called by
create_canvas_course controller method with appropriate arguments
(collapses a bunch of individual parameter tests)
"""
job = Mock(spec=CanvasCourseGenerationJob())
course_generation_job__objects__create.return_value = job
query_set = Mock(get=Mock(return_value=job))
course_generation_job__objects__filter.return_value = query_set
course_model_mock = self.get_mock_of_get_course_data()
get_course_data.return_value = course_model_mock
sis_account_id_argument = 'sis_account_id:' + course_model_mock.sis_account_id
course_code_argument = course_model_mock.course_code
course_name_argument = course_model_mock.course_name
course_term_id_argument = 'sis_term_id:' + course_model_mock.sis_term_id
course_sis_course_id_argument = self.sis_course_id
bulk_job = BulkCanvasCourseCreationJob(id=self.bulk_job_id, template_canvas_course_id=None)
controller.create_canvas_course(self.sis_course_id, self.sis_user_id, bulk_job)
assert not get_default_template_for_school.called
assert not get_template_course.called
create_new_course.assert_called_with(
request_ctx=SDK_CONTEXT,
account_id=sis_account_id_argument,
course_name=course_name_argument,
course_course_code=course_code_argument,
course_term_id=course_term_id_argument,
course_sis_course_id=course_sis_course_id_argument,
course_is_public_to_auth_users=False
)
|
# GFF3-parser based on gffutils
# Extracts gene transcript information from a GFF3 file
# In addition to the GFF3 annotation the genomic sequence or the transcript sequences are needed
# to retrieve the transcript sequences,
# Some information is required to correctly parse the GFF3 file:
# Is the sequence format genome or transcripts?
# How is the transcript feature called (e.g. Gene or mRNA)
# Which subfeatures of the transcript should be included (e.g. CDS, UTR, exons)
# Returns a list of gene annotations in this format:
#(gene_id, species_name, contig_name, start_index, stop_index, strand_orientation, gene_name, gene_description,
# nt_sequence, prot_sequence)
import gffutils
import os
from pyfaidx import Fasta
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
class GFF3Parser_v2:
# Path to GFF3
# Next gene node ID
# Name of transcript feature (e.g. gene or mRNA)
# Names of transcript subfeatures to include (list), e.g. [CDS,UTR]
# Feature containing "Name" attribute plus name of attribute. e.g. gene:name
# Feature containing "Description" attribute plus name of attribute e.g. CDS:product
# Both attributes are NOT mandatory. If name attribute is not set, the transcript ID will be used as name
# If description is missing, this attribute will not be included in the database
def __init__(self, transcript_output, translate_output):
self.output_path_nt_transcript = transcript_output
self.output_path_prot_translation = translate_output
# Ensure that output files are empty
with open(self.output_path_nt_transcript, "w") as nt_out:
with open(self.output_path_prot_translation, "w") as prot_out:
pass
# Each gene node gets an unique id, starting with zero
self.gene_node_id = 0
# Reverse complement a nucleotide sequence
def reverse_complement(self, sequence):
rev_complement = {"A": "T", "T": "A", "C": "G", "G": "C"}
try:
sequence = "".join([rev_complement[nt] for nt in sequence])[::-1]
# If non-nucleotide letters are found: return empty string
except KeyError:
sequence = ""
return sequence
# Translate a nucleotide sequence into protein sequence
def translate_nt(self, nt_sequence):
# Coding sequence should be in frame
# If nucleotide sequence does not start with ATG, continuously remove 3 letters until sequence starts with ATG
while nt_sequence:
if nt_sequence[:3] == "ATG": break
else:
if len(nt_sequence) < 3: return ""
else:
nt_sequence = nt_sequence[3:]
# Biopython demands coding sequence lengths to be a multiple of three
# Add trailing N to those sequences that fail this requirement
while len(nt_sequence)%3 != 0:
nt_sequence += "N"
coding_seq = Seq(nt_sequence, IUPAC.ambiguous_dna)
return str(coding_seq.translate(to_stop=True))
def parse_gff3_file(self, gff3_file_path, sequence_file_path, seq_is_genome, parent_feature_type,
subfeatures, name_attribute, descr_attribute):
species_name = os.path.splitext(os.path.basename(gff3_file_path))[0]
# Load GFF3 file
gffutils.create_db(gff3_file_path, "gff3utils.db", merge_strategy="create_unique", force=True)
gff3_db = gffutils.FeatureDB('gff3utils.db', keep_order=False)
# Parse sequence file
# Is sequence the genome (true) or already spliced transcripts (false)
if seq_is_genome:
sequence = Fasta(sequence_file_path, sequence_always_upper=True)
else:
sequence = None
# Set the list of all subfeatures of parent_feature that can be included in the final transcript
# Input format: subfeat1,subfeat2,subfeat3
subfeatures = subfeatures.split(",")
# Attribute location is converted from feat:attr to a tuple
name_attribute = name_attribute.split(":")
descr_attribute = descr_attribute.split(":")
# Collect all gene annotations in a list
gene_annotation_list = []
# Iterate through all transcripts (identified by parent_feature_type)
for transcript in gff3_db.iter_by_parent_childs(parent_feature_type):
# Increase gene node ID by one
self.gene_node_id += 1
# Extract all "standard" attributes for this transcript:
# name and description may be changed by name_attribute and descr_attribute
gene_annotation = [self.gene_node_id, species_name, transcript[0].seqid, transcript[0].start,
transcript[0].stop, transcript[0].strand, transcript[0].id, ""]
if name_attribute[0] == parent_feature_type:
try:
gene_annotation[6]=transcript[0][name_attribute[1]][0]
except KeyError:
gene_annotation[6] = ""
if descr_attribute[0] == parent_feature_type:
try:
gene_annotation[7]=transcript[0][descr_attribute[1]][0]
except KeyError:
gene_annotation[7]= ""
# Collect gene annotation in list
gene_sequence = []
# Iterate through all subfeatures of this transcript
# Two tasks are performed here: Look for name or descr attributes
# Build the sequence of this transcript
for subfeature in transcript[1:]:
# Check if feature type is in the list of selected subfeatures
if subfeature.featuretype in subfeatures:
# Check if name or description attribute can be found in this subfeature
if name_attribute[0] == subfeature.featuretype:
try:
gene_annotation[6] = subfeature[name_attribute[1]][0]
except KeyError:
pass
if descr_attribute[0] == subfeature.featuretype:
try:
gene_annotation[7] = subfeature[descr_attribute[1]][0]
except KeyError:
pass
# Collect sequence of this subfeature if nucleotide sequence is the genome
# Important: Current version of GFFutils has a bug preventing the automatic reverse-complement
# of minus-strand features
# Strandedness is therefore evaluated manually here
if seq_is_genome:
# Is sequence from a minus-strand feature?
antisense = subfeature.strand == "-"
# If antisense, reverse complement the sequence
seq_fragment = subfeature.sequence(sequence, False) if not antisense \
else self.reverse_complement(subfeature.sequence(sequence, False))
# Include the coding phase, phase is zero if phase field is empty:
phase = int(subfeature.frame) if subfeature.frame.isdigit() else 0
# If a gene consists of multiple segments they need to be sorted by their start index
# For antisense strand features the negative of the start index is used
start_index = int(subfeature.start) if not antisense else int(subfeature.start)*(-1)
# Store each gene sequence fragment in a tuple together with its start index
gene_sequence.append((start_index, seq_fragment[phase:]))
# Join all sequence fragments together
# First, sort by start_index
gene_sequence = sorted(gene_sequence, key=lambda x: x[0])
# Now join fragments into a single string
gene_sequence = "".join([item[1] for item in gene_sequence])
# Translate gene sequence into protein sequence
if gene_sequence:
protein_sequence = self.translate_nt(gene_sequence)
else:
protein_sequence = ""
# Append gene and protein sequence to gene annotation list
gene_annotation.append(gene_sequence)
gene_annotation.append(protein_sequence)
gene_annotation_list.append(gene_annotation)
# Write sequence to file, except when no sequence could be retrieved
if not gene_sequence:
continue
# The fasta annotation line is '>lcl|' plus the gene node ID
# 'lcl|' is required by blast+ to ensure correct parsing of the identifier
with open(self.output_path_nt_transcript, "a") as output_nt:
output_nt.write(">lcl|"+str(gene_annotation[0])+"\n")
output_nt.write(gene_sequence+"\n")
if not protein_sequence:
continue
with open(self.output_path_prot_translation, "a") as output_prot:
output_prot.write(">lcl|"+str(gene_annotation[0])+"\n")
output_prot.write(protein_sequence + "\n")
# Sort the gene_list by contig, start and stop. Only one species per file, so no need to sort by species
gene_annotation_list = sorted(gene_annotation_list, key=lambda x: (x[2], int(x[3]), int(x[4])))
# Delete genome index file
os.remove(sequence_file_path + ".fai")
return gene_annotation_list
# # Retrieve a single nt transcript by FASTA header ID
# def get_nt_sequence(self, id):
# try:
# nt_transcripts = Fasta(self.gff3_file_path + "_transcripts.fa", as_raw=True)
# nt_transcript = str(nt_transcripts["lcl|" + str(id)])
# except (KeyError,UnboundLocalError):
# nt_transcript = ""
# os.remove(self.gff3_file_path+"_transcripts.fa" + ".fai")
# return nt_transcript
#
# # Retrieve a single prot translation by FASTA header ID
# def get_prot_sequence(self, id):
# try:
# prot_transcripts = Fasta(self.gff3_file_path + "_translations.fa", as_raw=True)
# prot_transcript = str(prot_transcripts["lcl|" + str(id)])
# except (KeyError, UnboundLocalError):
# prot_transcript = ""
# os.remove(self.gff3_file_path + "_translations.fa" + ".fai")
# return prot_transcript
# Delete transcripts and translations
def delete_transcripts_translations(self):
os.remove(self.output_path_nt_transcript)
os.remove(self.output_path_prot_translation)
|
from lxml import etree
text = '''
<div>
<ul>
<li class="item-0"><a href="link1.html">first item</a></li>
<li class="item-1"><a href="link2.html">second item</a></li>
<li class="item-inactive"><a href="link3.html">third item</a></li>
<li class="item-1"><a href="link4.html">fourth item</a></li>
<li class="item-0"><a href="link5.html">fifth item</a> # 注意,此处缺少一个 </li> 闭合标签
</ul>
</div>
'''
#利用etree.HTML,将字符串解析为HTML文档
html=etree.HTML(text)
# 按字符串序列化HTML文档
result=etree.tostring(html)
li=html.xpath("//li[@class='item-0']")
li.xpath('/a/@href')
print(li)
# print(result) |
records = []
for i in range(5):
string_in=input("input costs\n")
records.append(string_in)#
print(records)
|
# coding: utf-8
from google.appengine.ext import ndb
import model
class Crumb(model.Base):
"""A class describing Crumbs."""
code = ndb.StringProperty()
lat = ndb.FloatProperty()
lng = ndb.FloatProperty()
place = ndb.StringProperty( required = False )
PUBLIC_PROPERTIES = ['code', 'lat', 'lng', 'place']
PRIVATE_PROPERTIES = []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.