text stringlengths 38 1.54M |
|---|
from django.db import models
from jsonfield import JSONField
from django import forms
from django.core.validators import MinLengthValidator
# Create your models here.
class Book(models.Model):
isbn = models.BigIntegerField(primary_key=True)
title = models.CharField(max_length=128)
memo = JSONField(default={}, dump_kwargs={'ensure_ascii': False})
def __str__(self):
return self.title
#forms 에서 model.py로 옮겨옴
def min_length_3_validator(value):
if len(value)<3:
raise forms.ValidationError('3글자 이상 입력해주세요')
class Post(models.Model):
title = models.CharField(max_length=100, validators=[min_length_3_validator])
content = models.TextField()
user_agent= models.CharField(max_length=200)
ip = models.CharField(max_length=15)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class GameUser(models.Model):
server_name = models.CharField(max_length=10,
choices=(
('A','A서버'),
('B','B서버'),
('C','C서버'),
))
user_name = models.CharField(max_length=20, validators=[MinLengthValidator(3)])
# 이렇게하면 서버네임과 유저네임이 묶여서 중복이 안되게됨.
# 즉 무슨말이나면 A서버의 d와 B서버의 d는 별개로 존재가능함.
class Meta:
unique_together =[
('server_name','user_name')
] |
import requests
import json
import os
import time
from push import push
_push=push()
uid=os.environ["uid"]
token=os.environ["token"]
data={
"uid" : uid,
"token" : token,
}
TTbody=data
headers={
'Host': 'node.52tt.com',
'Content-Type': 'application/json',
'Origin': 'http://appcdn.52tt.com',
'Accept-Encoding': 'br, gzip, deflate',
'Connection': 'keep-alive',
'Accept':'*/*',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 TT/5.5.6 NetType/Wifi',
'Referer': 'http://appcdn.52tt.com/web/frontend-web-activity-new-user-clock-in-thirty-day/index.html',
'Content-Length': '526',
'Accept-Language': 'zh-cn',
}
global contents
contents = ''
def output(content):
global contents
contents+=content+'\n'
print(content)
def userinfo():
url='https://node.52tt.com/activity-production/new-user-month-checkin/activity.Checkin/init'
res = requests.post(url=url,headers=headers,json=TTbody)
print(res.text)
code = json.loads(res.text)['code']
if code==0:
#用户名
nickname= json.loads(res.text)['data']['userInfo']['nickname']
#当前累计收益
curMoney= json.loads(res.text)['data']['curMoney']
#已打卡次数
taskIndex= json.loads(res.text)['data']['taskIndex']
output(f'[+]用户名:{nickname}\n[+]当前累计收益:{curMoney}\n[+]已打卡次数:{taskIndex}')
def sign():
nowtime=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
url='https://node.52tt.com/activity-production/new-user-month-checkin/activity.Checkin/checkin'
res = requests.post(url=url,headers=headers,json=TTbody)
code = json.loads(res.text)['code']
msg = json.loads(res.text)['msg']
if code==2:
output(f'[+]当前时间:{nowtime}')
output(f'[+]{msg}')
if code==0:
#累计获得的钱
curMoney = json.loads(res.text)['data']['curMoney']
output(f'\n[+]打卡成功,累计获得:{curMoney}')
def main():
#查询用户打卡信息
userinfo()
#打卡
sign()
#信息推送
#钉钉推送
try:
_push.dingtalk(contents)
except:
print('[+]推送参数未填写或推送脚本出错')
#print(contents)
def main_handler(event, context):
return main()
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 10:48:18 2020
@author: bryce
"""
import numpy as np
inp = np.random.rand(5,2)
def sigmoid(inp):
return np.array([(1+np.exp(-x))**-1 for x in inp])
sigmoid(inp) |
from functools import partial
def renameTable(table: str, table_new, conn, identifier='`'):
cur = conn.cursor()
sql = "ALTER TABLE {2}{0}{2} RENAME TO {2}{1}{2};".format(table, table_new, identifier)
print(sql)
cur.execute(sql)
conn.commit()
renameTablePostgre = partial(renameTable, identifier='"') |
"""
Twitter Bootstrap 4 - helper methods
"""
from textwrap import dedent
from uuid import uuid4
from django import template
from django.utils.safestring import mark_safe
from plotly.graph_objs._figure import Figure
register = template.Library()
@register.simple_tag()
def bs4_thead(columns: str) -> str:
ths = "".join([f"<th>{col}</th>" for col in columns.split(",")])
return mark_safe(f"<thead>{ths}</thead>")
@register.simple_tag()
def bs4_colgroup(columns: str) -> str:
"""Generate a colgroup
Args:
columns (str): A comma separated list of integer widths; must sum to 100
"""
values = list(map(int, columns.split(",")))
if sum(values) != 100:
raise ValueError(
f"colgroup width in `bs4_colgroup` does not sum to 100; got {sum(values)}."
)
ths = "".join([f'<col style="width: {v}%;">' for v in values])
return mark_safe(f"<colgroup>{ths}</colgroup>")
@register.simple_tag()
def bs4_fullrow(text: str, tr_attrs: str = "") -> str:
"""Generate a full-width row.
Args:
text (str): Text to be displayed
tr_attrs (str, default ""): Attributes to add to the wrapping `tr`
"""
return mark_safe(
f'<tr {tr_attrs}><td colspan="100%"><p class="text-center mb-0">{text}</p></td></tr>'
)
@register.tag(name="alert")
def bs4_alert(parser, token):
args = token.contents.split()
alert_type = args[1] if len(args) > 1 else "danger"
nodelist = parser.parse(("endalert",))
parser.delete_first_token()
return AlertWrapperNode(nodelist, alert_type)
class AlertWrapperNode(template.Node):
def __init__(self, nodelist, alert_type: str):
self.nodelist = nodelist
self.alert_type = alert_type
def render(self, context):
return f'<div class="alert alert-{self.alert_type}">{self.nodelist.render(context)}</div>'
_plotly_events = {"dom": "DOMContentLoaded", "htmx": "htmx:afterSettle"}
@register.simple_tag()
def plotly(fig: Figure | None, **kw) -> str:
"""Render a plotly figure
fig (Figure): the plotly figure to render
event: (Literal["dom", "htmx"]): the event that should trigger loading plotly. Defaults to
"dom" when dom is fully loaded. If set to "htmx", will render after htmx settles.
resizable: (bool, default False). If true, the figure can be resized by the user.
"""
if fig is None:
return ""
id = uuid4()
config = fig.to_json()
event = _plotly_events[kw.get("event", "dom")]
resizable = str(bool(kw.get("resizable", False))).lower()
func = f'()=>{{window.app.renderPlotlyFigure(document.getElementById("{id}"), {config}, {resizable});}}'
return mark_safe(
dedent(
f"""
<div id="{id}"><span class="text-muted">Loading...</span></div>
<script>document.addEventListener("{event}", {func}, false);</script>"""
)
)
|
from exts import db
from datetime import datetime
'''
比如说,将 user 模型映射到数据库中:
1、在 models.py 中建好数据表模型,并在 manage.py 中导入模型
2、在根目录下:python manage.py db init (初始化数据库迁移环境,只在项目第一次使用时执行)
3、执行:python manage.py db migrate (将模型映射到数据库中,此刻数据库中增加数据库版本,模型还未真正映射成功) => 数据库升级
这里遇到第二行命令报错:
#return __import__('MySQLdb')
#ModuleNotFoundError: No module named 'MySQLdb'
这是没有安装 MySQLdb 执行: pip install MySQLdb
再次执行,继续报错:
#self.encoding = charset_by_name(self.charset).encoding
#AttributeError: 'NoneType' object has no attribute 'encoding'
这里使用 pymysql ,使用 mysqldb 将会报错
注意编码问题:utf8
DB_URI = 'mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8'.format(USERNAME, PASSWORD, HOSTNAME, PORT, DATABASE)
4、将模型真正映射到数据库中:python manage.py db upgrade
'''
# 用户表模型
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
mobile = db.Column(db.String(11), nullable=False)
nickname = db.Column(db.String(24), nullable=False)
password = db.Column(db.String(100), nullable=False)
# 问答模型
class Question(db.Model):
__tablename__ = 'question'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
title = db.Column(db.String(100), nullable=False)
content = db.Column(db.Text, nullable=False)
# now()获取服务器第一次运行时间,之后每次创建新数据模型时间都是一样的,时间不会更新
# now 每次创建新数据模型都会调 now 这个函数,时间会更新
create_time = db.Column(db.DateTime, default=datetime.now)
# ForeignKey 指定外键
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# backref 反转: 通过 question 查找 user 发布的所有问答
author = db.relationship('User', backref=db.backref('question'))
# 评论模型
class Comment(db.Model):
__tablename__ = 'comment'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
content = db.Column(db.Text, nullable=False)
comment_time = db.Column(db.DateTime, default=datetime.now)
question_id = db.Column(db.Integer, db.ForeignKey('question.id'))
author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
question = db.relationship('Question', backref=db.backref('comment', order_by=id.desc()))
author = db.relationship('User', backref=db.backref('comment'))
|
""" Utils that are implemented in scikit-learn.
"""
import sklearn.utils.extmath as extmath
cartesian = extmath.cartesian |
from sensors.interfaces import GPIOs
from sensors.tachometer import Tachometer
GPIO_ADDRESS_A = 15
gpios = GPIOs([GPIO_ADDRESS_A, ])
interfaces = {
'gpios': gpios,
}
encoder = Tachometer(interfaces)
print(encoder.get_value())
|
#!/usr/bin python
from subprocess import call
from time import gmtime,strftime,sleep
import os
import picamera
import io
import sys
from PIL import Image
import math,operator
import tweepy
import urllib
import re
#import numpy as np
width = 1280
heigth = 960
treshold = 20 # trigger for change detection
res1 = 100 # x resolution for compare
res2 = 75 # y resolution for compare
trigger = 100
tweepy_codes_path="tweepy_codes.txt"
how_often_detection_test=6 #testing motion
def start():
print("starting servo")
os.system("sudo /home/pi/PiBits/ServoBlaster/user/servod")
def call_command(servo,pulsewidth):
print("echo "+str(servo)+"="+pulsewidth+" > /dev/servoblaster")
os.system("echo "+str(servo)+"="+pulsewidth+" > /dev/servoblaster")
def servo_adjust_plus(servo,adjust): #+10,+20
os.system("echo "+servo+"=+"+adjust+" > /dev/servoblaster")
print("executing:"+"echo "+servo+"=+"+adjust+" > /dev/servoblaster")
def servo_adjust_minus(servo,adjust):
os.system("echo "+servo+"=-"+adjust+" > /dev/servoblaster")
print("executing:"+"echo "+servo+"=-"+adjust+" > /dev/servoblaster")
def kill_servos():
os.system("sudo killall servod")
def servo_adjust_pct(servo,adjust):
os.system("echo "+(servo)+"="+(adjust)+" > /dev/servoblaster")
def move_tilt_pct(command,command2):
start()
sleep(5)
#command=(raw_input("how much to move the servo 0 in pc "))
servo_adjust_pct("0",command)
#command2=(raw_input("how much to move servo 1 in pct "))
servo_adjust_pct("1",command2)
kill_servos()
def move_tilt_value():
start()
sleep(5)
command=(raw_input("how much to move the servo 0 in pts "))
print(command)
call_command(0,command)
command=(raw_input("how much to move the servo 1 in pts "))
call_command(1,command)
kill_servos()
def take_a_photo(path):
with picamera.PiCamera() as camera:
camera.start_preview()
sleep(5)
camera.capture(path)
camera.stop_preview()
#loads old picture takes a new one, compares with trshold and b/e and sends a signal
#should be the same resolution
def compare_with_pattern(path1,path2):
p1 = Image.open(path1)
p2 = Image.open(path2)
h1 = p1.histogram()
h2 = p2.histogram()
rms = math.sqrt(reduce(operator.add,map(lambda a,b: (a-b)**2, h1,h2))/len(h1))
return rms
def detect_if_move(old_path):
p1 = Image.open(old_path) #loading pattern
tmp = "test.jpg"
w,h = p1.size
#picture.show()
with picamera.PiCamera() as camera: #taking new shot
camera.resolution = (w,h)
sleep(2)
camera.capture(tmp)
p2 = Image.open(tmp)
#print("w,h %i %i" % (w,h))
#diff = np.subtract(p1,p2)
#total = np.sum(diff)
h1=p1.histogram()
h2=p2.histogram()
rms = math.sqrt(reduce(operator.add,map(lambda a,b: (a-b)**2, h1,h2))/len(h1))
return (rms,tmp)
def easy_shot(path,camera):
camera.capture(path,resize=(1024,768))
#print"taking a picture %s \n" % path
def shot_to_publish(path,w=1024,h=768):
print "shooting to publish"
with picamera.PiCamera() as camera: #taking new shot
camera.resolution = (w,h)
sleep(2)
camera.capture(path)
def compare(camera):
camera.resolution=(res1,res2)
stream = io.BytesIO()
camera.capture(stream,format='bmp')
stream.seek(0)
im = Image.open(stream)
buffer = im.load()
stream.close()
return buffer
def count_diff(buffer):
diff = 0
for x in xrange(0,res1):
for y in xrange(0,res2):
px_diff = abs(buffer[0][x,y][1]-buffer[1][x,y][1])
if px_diff > treshold:
diff += 1
return diff
#detection of the move using stream
def detect_and_save(how_many=10):
camera = picamera.PiCamera()
count = 0
buffer=[]
buff = compare(camera)
buffer.append(buff)
buffer.append(buff)
sleep(10)
while count < how_many:
buffer[1] = compare(camera)
if count_diff(buffer) > trigger:
buffer[0] = buffer[1]
shottime = strftime("%Y-%m-%d %H:%M:%S",gmtime())
easy_shot(shottime+".jpg",camera)
count += 1
#break
sleep(10)
def load_tweepy_codes(path):
codes={}
file = open(path)
for i in range(0,4):
line = file.readline()
tmp=line.split(":")
#print tmp
codes[tmp[0]]=tmp[1]
return codes
def update_twitter(photo_path,comment = 'detection'):
api.update_with_media(photo_path,status = comment)
def load_servos_info_from_page(page):
sock = urllib.urlopen(page)
html = sock.read()
sock.close()
serv1 = re.findall("Servo1:[-+%]?\d+",html)
serv2 = re.findall("Servo2:[-+%]?\d+",html)
s1 = serv1[-1].split(':')
s2 = serv2[-1].split(':')
return s1[1],s2[1]
codes = load_tweepy_codes(tweepy_codes_path)
servo_page = "http://ratingpedia.eu/parrots/"
api_key = codes['Consumer Key (API Key)'].strip()
api_secret = codes['Consumer Secret (API Secret)'].strip()
access_token = codes['Access Token'].strip()
token_secret = codes['Access Token Secret'].strip()
auth = tweepy.OAuthHandler(api_key,api_secret)
auth.set_access_token(access_token,token_secret)
api = tweepy.API(auth)
my_twitter = api.me()
servo1,servo2 = load_servos_info_from_page(servo_page) #loading servos settings from the page
print my_twitter.name, "is connected"
#tweet_text=['Test shot of birds station',
# 'Move detected with rPi']
#move_tilt_pct()
#move_tilt_value()
go=1
diff_threshold = 1550
sleep(60)
numberOfPictures = 0
pattern = strftime("%Y-%m-%d %H:%M:%S",gmtime())+'.jpg'
shot_to_publish(pattern)
print "patter taken as " + pattern
# to_twitter = False
# input_read = raw_input("Sending to twitter y/n?")
# if input_read == 'y':
# to_twitter = True
# print "\n %r" %to_twitter
to_twitter = True
while go==1:
if numberOfPictures > 50:
break
diff, path = detect_if_move(pattern)
sleep(how_often_detection_test)
print "diff is " + str(diff)
if diff > diff_threshold:
for i in xrange(1,5): #takes 5 picstures if move is detected
print "motion detected"
time_event = strftime("%Y-%m-%d %H:%M:%S",gmtime())
shot_name = strftime("%Y-%m-%d %H:%M:%S",gmtime())+'.jpg'
shot_to_publish(shot_name)
#api.update_with_media(shot_name,status = 'detection')
diff_result = compare_with_pattern(pattern,shot_name)
print "diff = %d" %diff_result
if ( diff_result > diff_threshold) and (to_twitter):
print "updating twitter with " + shot_name
update_twitter(shot_name,"picture taken in the garden by RasPi")
sleep(10)
numberOfPictures += 1
sleep(300)
s1,s2 = load_servos_info_from_page(servo_page)
if (s1 != servo1) or (s2 != servo2):
servo1, servo2 = s1, s2
print "moving servos"
print s1,s2
move_tilt_pct(s1,s2)
|
import os.path
import be.repository.access as dbaccess
import commonlib.messaging.messages
messagecust_store = dbaccess.stores.messagecust_store
def new(owner_id, name, content):
return messagecust_store.add(owner=owner_id, name=name, content=content)
def get(owner_id, name):
mcust = messagecust_store.get_one_by_safe(crit=dict(owner=owner_id, name=name))
return mcust.content if mcust else getattr(commonlib.messaging.messages, name).content_dict['plain']
def list(owner_id): # TODO: support names
return messagecust_store.get_by(crit=dict(owner=owner_id, name=name))
def update(owner_id, name, content):
#msg = get(name, owner_id)
# TODO send notification with old content
mcust = messagecust_store.get_one_by_safe(crit=dict(owner=owner_id, name=name))
if mcust:
messagecust_store.update_by(crit=dict(owner=owner_id, name=name), content=content)
else:
new(owner_id, name, content)
|
# Volatility
# Copyright (C) 2008-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
This module implements the fast module scanning
@author: AAron Walters and Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: awalters@4tphi.net,bdolangavitt@wesleyan.edu
@organization: Volatility Foundation
"""
#pylint: disable-msg=C0111
import common
import volatility.plugins.filescan as filescan
import volatility.scan as scan
import volatility.utils as utils
import volatility.obj as obj
import volatility.debug as debug #pylint: disable-msg=W0611
class PoolScanModuleFast(scan.PoolScanner):
def object_offset(self, found, address_space):
return found + (address_space.profile.get_obj_size("_POOL_HEADER") -
address_space.profile.get_obj_offset("_POOL_HEADER", "PoolTag"))
checks = [ ('PoolTagCheck', dict(tag = 'MmLd')),
('CheckPoolSize', dict(condition = lambda x: x > 0x4c)),
('CheckPoolType', dict(paged = True, non_paged = True, free = True)),
('CheckPoolIndex', dict(value = 0)),
]
class ModScan(filescan.FileScan):
""" Scan Physical memory for _LDR_DATA_TABLE_ENTRY objects
"""
# Declare meta information associated with this plugin
meta_info = dict(
author = 'Brendan Dolan-Gavitt',
copyright = 'Copyright (c) 2007,2008 Brendan Dolan-Gavitt',
contact = 'bdolangavitt@wesleyan.edu',
license = 'GNU General Public License 2.0',
url = 'http://moyix.blogspot.com/',
os = 'WIN_32_XP_SP2',
version = '1.0',
)
def calculate(self):
## Here we scan the physical address space
address_space = utils.load_as(self._config, astype = 'physical')
## We need the kernel_address_space later
kernel_as = utils.load_as(self._config)
scanner = PoolScanModuleFast()
for offset in scanner.scan(address_space):
ldr_entry = obj.Object('_LDR_DATA_TABLE_ENTRY', vm = address_space,
offset = offset, native_vm = kernel_as)
yield ldr_entry
def render_text(self, outfd, data):
self.table_header(outfd,
[("Offset(P)", "[addrpad]"),
('Name', "20"),
('Base', "[addrpad]"),
('Size', "[addr]"),
('File', "")
])
for ldr_entry in data:
self.table_row(outfd,
ldr_entry.obj_offset,
str(ldr_entry.BaseDllName or ''),
ldr_entry.DllBase,
ldr_entry.SizeOfImage,
str(ldr_entry.FullDllName or ''))
class CheckThreads(scan.ScannerCheck):
""" Check sanity of _ETHREAD """
kernel = 0x80000000
def check(self, found):
pool_base = found - self.address_space.profile.get_obj_offset(
'_POOL_HEADER', 'PoolTag')
pool_obj = obj.Object("_POOL_HEADER", vm = self.address_space,
offset = pool_base)
## We work out the _ETHREAD from the end of the
## allocation (bottom up).
pool_alignment = obj.VolMagic(self.address_space).PoolAlignment.v()
thread = obj.Object("_ETHREAD", vm = self.address_space,
offset = pool_base + pool_obj.BlockSize * pool_alignment -
common.pool_align(self.address_space, '_ETHREAD', pool_alignment))
#if (thread.Cid.UniqueProcess.v() != 0 and
# thread.ThreadsProcess.v() <= self.kernel):
# return False
## check the start address
if thread.Cid.UniqueProcess.v() != 0 and thread.StartAddress == 0:
return False
## Check the Semaphores
if (thread.Tcb.SuspendSemaphore.Header.Size != 0x05 and
thread.Tcb.SuspendSemaphore.Header.Type != 0x05):
return False
if (thread.KeyedWaitSemaphore.Header.Size != 0x05 and
thread.KeyedWaitSemaphore.Header.Type != 0x05):
return False
return True
class PoolScanThreadFast(scan.PoolScanner):
""" Carve out thread objects using the pool tag """
def object_offset(self, found, address_space):
""" This returns the offset of the object contained within
this pool allocation.
"""
## The offset of the object is determined by subtracting the offset
## of the PoolTag member to get the start of Pool Object
pool_base = found - self.buffer.profile.get_obj_offset('_POOL_HEADER', 'PoolTag')
pool_obj = obj.Object("_POOL_HEADER", vm = address_space,
offset = pool_base)
## We work out the _ETHREAD from the end of the
## allocation (bottom up).
pool_alignment = obj.VolMagic(address_space).PoolAlignment.v()
object_base = (pool_base + pool_obj.BlockSize * pool_alignment -
common.pool_align(address_space, '_ETHREAD', pool_alignment))
return object_base
checks = [ ('PoolTagCheck', dict(tag = '\x54\x68\x72\xe5')),
('CheckPoolSize', dict(condition = lambda x: x >= 0x278)),
('CheckPoolType', dict(paged = True, non_paged = True, free = True)),
('CheckPoolIndex', dict(value = 0)),
('CheckThreads', {}),
]
class ThrdScan(ModScan):
"""Scan physical memory for _ETHREAD objects"""
def calculate(self):
## Here we scan the physical address space
address_space = utils.load_as(self._config, astype = 'physical')
kernel_as = utils.load_as(self._config)
scanner = PoolScanThreadFast()
for found in scanner.scan(address_space):
thread = obj.Object('_ETHREAD', vm = address_space,
native_vm = kernel_as, offset = found)
yield thread
def render_text(self, outfd, data):
self.table_header(outfd,
[("Offset(P)", "[addrpad]"),
("PID", ">6"),
("TID", ">6"),
("Start Address", "[addr]"),
("Create Time", "30"),
("Exit Time", "30"),
])
for thread in data:
self.table_row(outfd, thread.obj_offset,
thread.Cid.UniqueProcess,
thread.Cid.UniqueThread,
thread.StartAddress,
thread.CreateTime or '',
thread.ExitTime or '',
)
|
import datetime
r"""
this creates an empty file
"""
filename=datetime.datetime.now()
#create empty filename
def create_file():
"""this creates an empty file"""
with open(filename.strftime("%y-%m-%d-%h-%M"),"w")as file:
file.write("")
create_file()
|
from django.forms import ModelForm, widgets
from django import forms
from .models import Product
class ProductForm(ModelForm):
class Meta:
model = Product
fields = '__all__'
widgets = {
'name' : forms.Textarea(attrs={'class':'form-control'}),
'weight' : forms.NumberInput(attrs={'class':'form-control'}),
'price' : forms.NumberInput(attrs={'class' : 'form-control'})
}
|
# instr_dec.py
# instruction decoder: combinatorial logic block
# input: MIPS 32-bit instruction
# sets up processor control blocks
import pyrtl
# instantiate a memory block storing sample instructions (32-bit each)
# reads one instruction per cycle
sample_instructions = [201326592, 286326786, 4202528, 2366177284]
mem = pyrtl.RomBlock(bitwidth=32, addrwidth=2, romdata=sample_instructions, max_read_ports=1)
# variable counter will serve as an address in this example
counter = pyrtl.Register(bitwidth=2)
counter.next <<= counter + 1
# read data stored in rom
data = pyrtl.WireVector(bitwidth=32, name='data')
data <<= mem[counter]
# output data
op = pyrtl.Output(bitwidth=6, name='op')
rs = pyrtl.Output(bitwidth=5, name='rs')
rt = pyrtl.Output(bitwidth=5, name='rt')
rd = pyrtl.Output(bitwidth=5, name='rd')
sh = pyrtl.Output(bitwidth=5, name='sh')
func = pyrtl.Output(bitwidth=6, name='func')
imm = pyrtl.Output(bitwidth=16, name='imm') # for I-type
addr = pyrtl.Output(bitwidth=26, name='addr') # for J-type instruct
### ADD YOUR INSTRUCTION DECODE LOGIC HERE ###
# R-Type: op(6), rs(5), rt(5), rd(5), shamt(5), funct(6)
# I-Type: op(6), rs(5), rt(5), immed(16)
# J-Type: op(6), addr(26)
# find opcode first to determine which type of instruction
func <<= data[:6]
sh <<= data[6:11]
rd <<= data[11:16]
rt <<= data[16:21]
rs <<= data[21:26]
op <<= data[26:]
addr <<= data[:26]
imm <<= data[:16]
# simulate
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
for cycle in range(4):
sim.step({})
sim_trace.render_trace(symbol_len=20, segment_size=1)
|
# This Python file uses the following encoding: utf-8
import sys
def levenshtein_distance(statement, other_statement):
import sys
from difflib import SequenceMatcher
if not statement or not other_statement:
return 0
statement_text = statement.lower()
other_statement_text = other_statement.lower()
similarity = SequenceMatcher(
None,
statement_text,
other_statement_text
)
percent = round(similarity.ratio(), 2)
return percent
|
from .MetricDataLoader import MetricDataLoader, MetricGroups
from .GeneralDataLoader import GeneralDataLoader
from .loader import Loader |
from ElementoMapa import ElementoMapa
class Bomba (ElementoMapa):
#construye la bomba
def __init__ (self):
self.__activada = False
#getter y setter de la activacion
def get_activada(self):
return self.__activada
def set_activada(self, activada):
self.__activada = activada
#activa la bomba
def entrar(self):
__activada = True
print("La bomba ha explotado.")
#verifica si el elem_mapa es bomba
def es_bomba(self):
return True |
from abc import ABC
class Vehicle:
def __init__(self, registration_number, ticket, vehicletype=None):
self.registration_number = registration_number
self.__type = vehicletype
self.__ticket = ticket
def assign_ticket(self, ticket):
self.__ticket = ticket
class Car(Vehicle):
def __init__(self, registration_number, ticket, vehicletype="Car"):
super().__init__(registration_number, ticket, vehicletype=vehicletype) |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
extensions = [
Extension("fonctC", ["script.py"]) # à renommer selon les besoins
]
setup(
cmdclass = {'build_ext':build_ext},
ext_modules = cythonize(extensions),
)
|
#coding:utf-8
import sys
import fasttext
import json
import logging
from sklearn.model_selection import train_test_split
from collections import defaultdict
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
class fasttext_trainer:
def __init__(self,modelname):
self.modelname_ = modelname
def train(self,traindata):
self.clf_ = fasttext.supervised(traindata,self.modelname_)
def test(self,testdata):
return self.clf_.test(testdata)
def load_model(self):
self.clf_ = fasttext.load_model(self.modelname_)
def predict(self,data):
return self.clf_.predict(data)
def train_fasttext(data_path,name):
logging.info('Generating training data to pre-defined format')
data=json.loads(open(data_path).read().strip())
X=[]
y=[]
label_dict=defaultdict(int)
lines=[]
for sample in data['data']:
label = sample['header']
content = sample['content']
lines.append('__label__{:} {:}'.format(label,content))
label_dict[label]+=1
for label in sorted(label_dict.keys()):
logging.info('{:}:{:}'.format(label,label_dict[label]))
logging.info('split training dataset')
train_lines,test_lines = train_test_split(lines,test_size=0.4,random_state=0)
open('../raw_data/train_data_for_fasttext.txt','w').write('\n'.join(train_lines))
open('../raw_data/test_data_for_fasttext.txt','w').write('\n'.join(test_lines))
logging.info('training')
ft_trainer = fasttext_trainer(name)
ft_trainer.train('../raw_data/train_data_for_fasttext.txt')
result = ft_trainer.test('../raw_data/test_data_for_fasttext.txt')
logging.info('Precision:{:}'.format(result.precision))
logging.info('Recall:{:}'.format(result.recall))
logging.info('Number of examples:{:}'.format(result.nexamples))
if __name__=="__main__":
train_fasttext(sys.argv[1],sys.argv[2])
|
# Create your views here.
from django.http import HttpResponse
from django.conf import settings
from jinja2 import Environment, FileSystemLoader, Markup
jinja = Environment(loader=FileSystemLoader(settings.TEMPLATE_DIRS), autoescape=True)
from applicati import model
base = model.createBase('./pickles').data.applications.blog
def publish(request, category=None):
print category
categoryName = category
category = getattr(base.categories, categoryName)
template = jinja.get_template('blog/entry.html')
context = {
'category': category,
}
return HttpResponse(template.render(**context))
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
#**********************************************************
#Filename: 110_range_int_as_element.py
#Author: Andrew Wang - shuguang.wang1990@gmail.com
#Description: ---
#Create: 2016-10-04 15:45:14
#Last Modifieda: 2016-10-04 15:45:14
#*********************************************************
|
from pages.frames.framePage import frame_Page_fields
from utilities.BaseClass import BaseClass
from utilities.reusablemethods import CustomMethods
import pytest_check as check
import pytest
class Test_validate_mouse_hover(BaseClass):
@pytest.mark.skip
def test_mousehover_click(self):
log = self.getLogger()
log.info("\n*** Test Case - test_mousehover_click ***\n")
res = CustomMethods(self.driver)
framePage = frame_Page_fields(self.driver)
res.mouse_hover_element(framePage.mousehoverBtn_locator, framePage.mousehoverBtn_locatortype)
ele = res.wait_visiblity_element(framePage.hover_menu_locator, framePage.hover_menu_locatortype)
check.is_not_none(ele)
res.mouse_hover_click(framePage.hover_topmenu_locator, framePage.hover_topmenu_locatortype) |
#!/usr/bin/python
import hashlib,binascii
import sys
input = sys.argv[1]
hash = hashlib.new('md4',input.encode('utf-16le')).digest()
print binascii.hexlify(hash)
|
#String Built-in Functions
#min(value)-returns the minimum of the letter in the string
a="hello welcome to python proggramming language"
res=min(a)
print(res)
#as space has the minimum so it prits space
|
from __future__ import print_function
from das_client import get_data
import subprocess
#from pdb import set_trace
def query(query_str, verbose=False):
'simple query function to interface with DAS, better than using Popen as everything is handled by python'
if verbose:
print('querying DAS with: "%s"' % query_str)
data = get_data(
'https://cmsweb.cern.ch',
query_str,
0, 0, False)
to_get = query_str.split()[0].strip(',')
if data['status'] != 'ok':
raise RuntimeError('Das query crashed')
#-1 works both when getting dataset from files and files from datasets,
#not checked on everything
return [i[to_get][-1]['name'] for i in data['data']]
|
import datetime
from math import sqrt
import operator
import time
from xml.dom import minidom
import xml.parsers.expat
import psycopg2
import requests
import config
rs = requests.Session()
rs.headers.update({'User-Agent': 'sbot'})
if config.bot.eve_dsn is not None:
db = psycopg2.connect(config.bot.eve_dsn)
crest_price_cache = {'last_update': 0, 'items': {}}
def price_check(cmd):
def get_prices(typeid, system=None, region=None):
url = 'http://api.eve-central.com/api/marketstat'
params = {'typeid': typeid}
if system:
params['usesystem'] = system
if region:
params['regionlimit'] = region
try:
dom = minidom.parseString(rs.get(url, params=params).text)
except xml.parsers.expat.ExpatError:
return None
buy = dom.getElementsByTagName('buy')[0]
buy_max = buy.getElementsByTagName('max')[0]
bid = float(buy_max.childNodes[0].data)
sell = dom.getElementsByTagName('sell')[0]
sell_min = sell.getElementsByTagName('min')[0]
ask = float(sell_min.childNodes[0].data)
all_orders = dom.getElementsByTagName('all')[0]
all_volume = all_orders.getElementsByTagName('volume')[0]
volume = int(all_volume.childNodes[0].data)
return bid, ask, volume
def __item_info(curs, query):
curs.execute('''
SELECT "typeID", "typeName" FROM "invTypes"
WHERE LOWER("typeName") LIKE %s AND "marketGroupID" IS NOT NULL
''', (query.lower(),))
results = curs.fetchmany(3)
if len(results) == 1:
return results[0]
if len(results) == 2 and \
results[0][1].endswith('Blueprint') ^ results[1][1].endswith('Blueprint'):
# an item and its blueprint; show the item
if results[0][1].endswith('Blueprint'):
return results[1]
else:
return results[0]
if len(results) >= 2:
return results
return None
def item_info(item_name):
with db.cursor() as curs:
# exact match
curs.execute(
'SELECT "typeID", "typeName" FROM "invTypes" WHERE LOWER("typeName") LIKE %s',
(item_name.lower(),))
result = curs.fetchone()
if result:
return result
# start of string match
results = __item_info(curs, item_name + '%')
if isinstance(results, tuple):
return results
if results:
names = map(lambda r: r[1], results)
cmd.reply('Found items: ' + ', '.join(names))
return None
# substring match
results = __item_info(curs, '%' + item_name + '%')
if isinstance(results, tuple):
return results
if results:
names = map(lambda r: r[1], results)
cmd.reply('Found items: ' + ', '.join(names))
return None
cmd.reply('Item not found')
return None
def format_prices(prices):
if prices is None:
return 'n/a'
if prices[1] < 1000.0:
return 'bid {0:g} ask {1:g} vol {2:,d}'.format(*prices)
prices = map(int, prices)
return 'bid {0:,d} ask {1:,d} vol {2:,d}'.format(*prices)
def get_crest_price(typeid):
now = time.time()
if crest_price_cache['last_update'] < now - 60 * 60 * 2:
res = rs.get('https://crest-tq.eveonline.com/market/prices/')
if res.status_code == 200:
crest_price_cache['items'].clear()
for item in res.json()['items']:
crest_price_cache['items'][item['type']['id']] = item
del item['type']
crest_price_cache['last_update'] = now
prices = crest_price_cache['items'].get(typeid)
if prices and 'averagePrice' in prices:
if prices['averagePrice'] < 1000.0:
return 'avg {averagePrice:g} adj {adjustedPrice:g}'.format(**prices)
for k, v in prices.items():
prices[k] = int(v)
return 'avg {averagePrice:,d} adj {adjustedPrice:,d}'.format(**prices)
else:
return 'n/a'
args = cmd.args
result = item_info(args)
if not result:
return
typeid, item_name = result
jita_system = 30000142
amarr_system = 30002187
jita_prices = get_prices(typeid, system=jita_system)
amarr_prices = get_prices(typeid, system=amarr_system)
jita = format_prices(jita_prices)
amarr = format_prices(amarr_prices)
crest = get_crest_price(typeid)
cmd.reply('%s\nJita: %s\nAmarr: %s\nCREST: %s' % (item_name, jita, amarr, crest))
def jumps(cmd):
split = cmd.args.split()
if len(split) != 2:
cmd.reply('usage: `!jumps [from] [to]`')
return
with db.cursor() as curs:
curs.execute('''
SELECT "solarSystemName" FROM "mapSolarSystems"
WHERE LOWER("solarSystemName") LIKE %s OR LOWER("solarSystemName") LIKE %s
''', (split[0].lower() + '%', split[1].lower() + '%')
)
results = list(map(operator.itemgetter(0), curs.fetchmany(2)))
query = [None, None]
for i, s in enumerate(split):
s = s.lower()
for r in results:
if r.lower().startswith(s):
query[i] = r
break
else:
cmd.reply('could not find system starting with ' + s)
break
if None in query:
return
r = rs.get('http://api.eve-central.com/api/route/from/%s/to/%s' % (query[0], query[1]))
try:
data = r.json()
except ValueError:
cmd.reply('error getting jumps')
return
jumps_split = []
for j in data:
j_str = j['to']['name']
from_sec = j['from']['security']
to_sec = j['to']['security']
if from_sec != to_sec:
j_str += ' (%0.1g)' % to_sec
jumps_split.append(j_str)
cmd.reply('%d jumps: %s' % (len(data), ', '.join(jumps_split)))
def lightyears(cmd):
split = [n + '%' for n in cmd.args.lower().split()]
if len(split) != 2:
cmd.reply('usage: !ly [from] [to]')
return
with db.cursor() as curs:
curs.execute('''
SELECT "solarSystemName", x, y, z FROM "mapSolarSystems"
WHERE LOWER("solarSystemName") LIKE %s OR LOWER("solarSystemName") LIKE %s
''', split)
result = curs.fetchmany(6)
if len(result) < 2:
cmd.reply('error: one or both systems not found')
return
elif len(result) > 2:
cmd.reply('error: found too many systems: ' + ' '.join(map(operator.itemgetter(0), result)))
return
dist = 0
for d1, d2 in zip(result[0][1:], result[1][1:]):
dist += (d1 - d2)**2
dist = sqrt(dist) / 9.4605284e15 # meters to lightyears
ship_ranges = [
('other:', 3.5),
('blackops:', 4.0),
('jump freighter:', 5.0),
('super:', 3.0),
]
jdc = []
for ship, jump_range in ship_ranges:
for level in range(0, 6):
if dist <= jump_range * (1 + level * 0.2):
jdc.append('%s %d' % (ship, level))
break
else:
jdc.append(ship + ' N/A')
cmd.reply('%s ⟷ %s: %.3f ly\n%s' % (result[0][0], result[1][0], dist, '\n'.join(jdc)))
def who(cmd):
dt_format = '%Y-%m-%dT%H:%M:%SZ'
try:
r = rs.get('https://api.eveonline.com/eve/CharacterId.xml.aspx', params={'names': cmd.args})
r.raise_for_status()
dom = minidom.parseString(r.text)
row = dom.getElementsByTagName('row')[0]
char_id = int(row.attributes['characterID'].nodeValue)
r = rs.get('https://esi.tech.ccp.is/v4/characters/%d/' % char_id)
r.raise_for_status()
data = r.json()
char_name = data['name']
corp_id = int(data['corporation_id'])
birthday = data['birthday']
birthday = datetime.datetime.strptime(birthday, dt_format).date()
security_status = data['security_status']
output = '%s: born %s, security status %.2f ' % (char_name, birthday, security_status)
output += 'https://zkillboard.com/character/%d/' % char_id
r = rs.get('https://esi.tech.ccp.is/v3/corporations/%d/' % corp_id)
r.raise_for_status()
data = r.json()
corp_name = data['corporation_name']
creation_date = data.get('creation_date') # NPC corps have no creation_date
if creation_date:
creation_date = str(datetime.datetime.strptime(creation_date, dt_format).date())
else:
creation_date = '?'
members = data['member_count']
alliance_id = data.get('alliance_id')
output += '\n%s: created %s, %s members ' % (corp_name, creation_date, members)
output += 'https://zkillboard.com/corporation/%d/' % (corp_id)
if alliance_id:
alliance_id = int(alliance_id)
r = rs.get('https://esi.tech.ccp.is/v2/alliances/%d/' % alliance_id)
r.raise_for_status()
data = r.json()
alliance_name = data['alliance_name']
founding_date = data['date_founded']
founding_date = datetime.datetime.strptime(founding_date, dt_format).date()
output += '\n%s: founded %s' % (alliance_name, founding_date)
cmd.reply(output)
except requests.exceptions.HTTPError:
cmd.reply("%s: couldn't find your sleazebag" % cmd.sender['username'])
|
animals = ['chicken', 'cow', 'snail', 'elephant']
print(sorted(animals))
print(sorted(animals, key=len))
decorated = [(len(w), w) for w in animals]
print(decorated)
decorated.sort()
result = [ d[1] for d in decorated]
print(result)
# at once
print( [ d[1] for d in sorted( [(len(w), w) for w in animals] ) ] )
|
import contextlib
import functools
import json
import os
import pytest
from dcicutils.env_base import LegacyController
from dcicutils.common import APP_CGAP, APP_FOURFRONT # , LEGACY_GLOBAL_ENV_BUCKET
from dcicutils.env_manager import EnvManager
from dcicutils.env_utils import (
is_stg_or_prd_env, is_cgap_env, is_fourfront_env, blue_green_mirror_env, env_equals,
get_mirror_env_from_context, is_test_env, is_hotseat_env, get_standard_mirror_env,
prod_bucket_env, prod_bucket_env_for_app, public_url_mappings, public_url_for_app, permit_load_data,
default_workflow_env, infer_foursight_url_from_env, foursight_env_name,
infer_repo_from_env, data_set_for_env, get_bucket_env, infer_foursight_from_env,
is_indexer_env, indexer_env_for_env, classify_server_url,
short_env_name, full_env_name, full_cgap_env_name, full_fourfront_env_name, is_cgap_server, is_fourfront_server,
# make_env_name_cfn_compatible,
get_foursight_bucket, get_foursight_bucket_prefix, ecr_repository_for_env,
# New support
EnvUtils, p, c, get_env_real_url,
_make_no_legacy, # noQA - yes, protected, but we want to test it
if_orchestrated, UseLegacy,
)
from dcicutils.env_utils_legacy import (
FF_PRODUCTION_ECR_REPOSITORY, blue_green_mirror_env as legacy_blue_green_mirror_env
)
from dcicutils.exceptions import (
BeanstalkOperationNotImplemented, # MissingFoursightBucketTable, IncompleteFoursightBucketTable,
EnvUtilsLoadError, LegacyDispatchDisabled,
)
from dcicutils.misc_utils import decorator, local_attrs, ignorable, override_environ
from dcicutils.qa_utils import raises_regexp
from typing import Optional
from unittest import mock
from urllib.parse import urlparse
from .helpers import using_fresh_cgap_state_for_testing, using_fresh_ff_state_for_testing
ignorable(BeanstalkOperationNotImplemented) # Stuff that does or doesn't use this might come and go
@contextlib.contextmanager
def stage_mirroring(*, enabled=True):
with local_attrs(EnvUtils, STAGE_MIRRORING_ENABLED=enabled):
yield
@contextlib.contextmanager
def orchestrated_behavior_for_testing(data: Optional[dict] = None):
"""
Context manager that arranges for a dynamic executation context to use a specified ecosystem description.
:param data: an ecosystem description (default EnvUtils.SAMPLE_TEMPLATE_FOR_CGAP_TESTING)
"""
snapshot = EnvUtils.snapshot_envutils_state_for_testing()
try:
EnvUtils.set_declared_data(data or EnvUtils.SAMPLE_TEMPLATE_FOR_CGAP_TESTING)
yield
finally:
EnvUtils.restore_envutils_state_from_snapshot_for_testing(snapshot)
@decorator()
def using_orchestrated_behavior(data: Optional[dict] = None):
"""
Decorator that arranges for the function it decorates to dynamically use a specified ecosystem description.
:param data: an ecosystem description (default EnvUtils.SAMPLE_TEMPLATE_FOR_CGAP_TESTING)
"""
def _decorate(fn):
@functools.wraps(fn)
def _wrapped(*args, **kwargs):
with orchestrated_behavior_for_testing(data=data):
result = fn(*args, **kwargs)
return result
return _wrapped
return _decorate
@using_orchestrated_behavior()
def test_orchestrated_ecr_repository_for_cgap_env():
for env in ['acme-prd', 'acme-stg']:
val = ecr_repository_for_env(env)
print(f"env={env} val={val}")
assert val != FF_PRODUCTION_ECR_REPOSITORY
assert val == env
for env in ['acme-mastertest', 'acme-foo']:
val = ecr_repository_for_env(env)
print(f"env={env} val={val}")
assert val == env
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_ecr_repository_for_ff_env():
for env in ['acme-prd', 'acme-stg']:
val = ecr_repository_for_env(env)
print(f"env={env} is_stg_or_prd={is_stg_or_prd_env(env)} val={val}")
assert val != FF_PRODUCTION_ECR_REPOSITORY
assert val != env
assert val == EnvUtils.PRODUCTION_ECR_REPOSITORY
for env in ['acme-mastertest', 'acme-foo']:
val = ecr_repository_for_env(env)
print(f"env={env} is_stg_or_prd={is_stg_or_prd_env(env)} val={val}")
assert val == env
@using_orchestrated_behavior()
def test_orchestrated_get_bucket_env():
assert EnvUtils.PRD_ENV_NAME == 'acme-prd'
assert EnvUtils.WEBPROD_PSEUDO_ENV == 'production-data'
assert get_bucket_env(EnvUtils.PRD_ENV_NAME) == EnvUtils.WEBPROD_PSEUDO_ENV
assert EnvUtils.STG_ENV_NAME is None
def test_the_usual_scenario():
assert get_bucket_env('acme-prd') == 'production-data' # PRD_ENV_NAME
assert get_bucket_env('cgap') == 'production-data' # mentioned in PUBLIC_URL_TABLE
assert get_bucket_env('acme-wolf') == 'acme-wolf' # normal env, uses bucket name exactly
assert get_bucket_env('acme-foo') == 'acme-foo' # normal env, uses bucket name exactly
assert get_bucket_env('foo') == 'foo' # normal env, uses bucket name exactly
assert get_bucket_env('acme-stg') == 'acme-stg' # NOTE: Just a normal env. Staging is not enabled.
assert get_bucket_env('stg') == 'stg' # NOTE: Alias for acme-stg, but that's no special env.
test_the_usual_scenario()
# The only way get_bucket_env differs from the identity function in the orchestrated environment
# is the hypothetical situation where we were supporting mirroring. In that case, the staging environment
# would be collapsed with the production environment so that they share buckets. Since we have no mirroring,
# we can only test that by a mock of is_stg_or_prd_env. -kmp 24-Jul-2021
with local_attrs(EnvUtils, STG_ENV_NAME='acme-stg'):
test_the_usual_scenario() # The STG_ENV_NAME is ignored if "stage_mirroring_enabled": false
with stage_mirroring(enabled=True):
assert EnvUtils.STG_ENV_NAME == 'acme-stg'
assert get_bucket_env('acme-prd') == 'production-data' # PRD_ENV_NAME
assert get_bucket_env('cgap') == 'production-data' # mentioned in PUBLIC_URL_TABLE
assert get_bucket_env('acme-wolf') == 'acme-wolf' # normal env, uses bucket name exactly
assert get_bucket_env('acme-foo') == 'acme-foo' # normal env, uses bucket name exactly
assert get_bucket_env('foo') == 'foo' # normal env, uses bucket name exactly
# with mirroring enabled, this uses prod bucket
assert get_bucket_env('acme-stg') == 'production-data'
# with mirroring enabled, this alias for acme-stg uses prod bucket
assert get_bucket_env('stg') == 'production-data'
@pytest.mark.skip(reason="Beanstalk functionality no longer supported.")
@using_orchestrated_behavior()
def test_orchestrated_prod_bucket_env():
assert EnvUtils.PRD_ENV_NAME == 'acme-prd'
assert EnvUtils.WEBPROD_PSEUDO_ENV == 'production-data'
assert prod_bucket_env(EnvUtils.PRD_ENV_NAME) == EnvUtils.WEBPROD_PSEUDO_ENV
assert EnvUtils.STG_ENV_NAME is None
def test_the_usual_scenario():
assert prod_bucket_env('acme-prd') == 'production-data' # PRD_ENV_NAME
assert prod_bucket_env('cgap') == 'production-data' # Aliased to acmd-prd in PUBLIC_URL_TABLE
assert prod_bucket_env('acme-wolf') is None # normal env, just returns None
assert prod_bucket_env('acme-foo') is None # normal env, just returns None
assert prod_bucket_env('foo') is None # normal env, just returns None
assert prod_bucket_env('acme-stg') is None # NOTE: Just a normal env. Staging is not enabled.
assert prod_bucket_env('stg') is None # NOTE: Just a normal env. Staging is not enabled.
test_the_usual_scenario()
# The only way get_bucket_env differs from the identity function in the orchestrated environment
# is the hypothetical situation where we were supporting mirroring. In that case, the staging environment
# would be collapsed with the production environment so that they share buckets. Since we have no mirroring,
# we can only test that by a mock of is_stg_or_prd_env. -kmp 24-Jul-2021
with local_attrs(EnvUtils, STG_ENV_NAME='acme-stg'):
test_the_usual_scenario() # The STG_ENV_NAME is ignored if "stage_mirroring_enabled": false
with stage_mirroring(enabled=True):
assert EnvUtils.STG_ENV_NAME == 'acme-stg'
assert prod_bucket_env('acme-prd') == 'production-data' # PRD_ENV_NAME
assert prod_bucket_env('cgap') == 'production-data' # in PUBLIC_URL_TABLE
assert prod_bucket_env('acme-wolf') is None # normal env, just returns None
assert prod_bucket_env('acme-foo') is None # normal env, just returns None
assert prod_bucket_env('foo') is None # normal env, just returns None
assert prod_bucket_env('acme-stg') == 'production-data' # WIT mirroring enabled, this uses prod bucket
assert prod_bucket_env('stg') == 'production-data' # WITH mirroring enabled, this uses prod bucket
@pytest.mark.skip(reason="Beanstalk functionality no longer supported.")
@using_orchestrated_behavior()
def test_orchestrated_prod_bucket_env_for_app():
assert prod_bucket_env_for_app() == EnvUtils.WEBPROD_PSEUDO_ENV
assert prod_bucket_env_for_app('cgap') == EnvUtils.WEBPROD_PSEUDO_ENV
with pytest.raises(Exception):
prod_bucket_env_for_app('fourfront')
with local_attrs(EnvUtils, **CGAP_SETTINGS_FOR_TESTING):
assert prod_bucket_env_for_app() == EnvUtils.WEBPROD_PSEUDO_ENV == 'fourfront-cgap'
assert prod_bucket_env_for_app('cgap') == EnvUtils.WEBPROD_PSEUDO_ENV
with pytest.raises(Exception):
prod_bucket_env_for_app('fourfront')
with local_attrs(EnvUtils, **FOURFRONT_SETTINGS_FOR_TESTING):
assert prod_bucket_env_for_app() == EnvUtils.WEBPROD_PSEUDO_ENV == 'fourfront-webprod'
assert prod_bucket_env_for_app('fourfront') == EnvUtils.WEBPROD_PSEUDO_ENV
with pytest.raises(Exception):
prod_bucket_env_for_app('cgap')
@using_orchestrated_behavior()
def test_orchestrated_infer_foursight_url_from_env():
assert (infer_foursight_url_from_env(request='ignored-request', envname='demo')
== 'https://foursight.genetics.example.com/api/view/demo')
actual = infer_foursight_url_from_env(request='ignored-request', envname='acme-foo')
expected = 'https://foursight.genetics.example.com/api/view/foo'
assert actual == expected
assert (infer_foursight_url_from_env(request='ignored-request', envname='fourfront-cgapwolf')
== 'https://foursight.genetics.example.com/api/view/fourfront-cgapwolf')
with local_attrs(EnvUtils, **FOURFRONT_SETTINGS_FOR_TESTING):
assert (infer_foursight_url_from_env(request='ignored-request', envname='data')
== 'https://foursight.4dnucleome.org/api/view/data')
assert (infer_foursight_url_from_env(request='ignored-request', envname='acme-foo')
== 'https://foursight.4dnucleome.org/api/view/acme-foo')
assert (infer_foursight_url_from_env(request='ignored-request', envname='fourfront-cgapwolf')
== 'https://foursight.4dnucleome.org/api/view/cgapwolf')
with local_attrs(EnvUtils, **CGAP_SETTINGS_FOR_TESTING):
assert (infer_foursight_url_from_env(request='ignored-request', envname='data')
== 'https://u9feld4va7.execute-api.us-east-1.amazonaws.com/api/view/data')
assert (infer_foursight_url_from_env(request='ignored-request', envname='acme-foo')
== 'https://u9feld4va7.execute-api.us-east-1.amazonaws.com/api/view/acme-foo')
assert (infer_foursight_url_from_env(request='ignored-request', envname='fourfront-cgapwolf')
== 'https://u9feld4va7.execute-api.us-east-1.amazonaws.com/api/view/cgapwolf')
@using_fresh_ff_state_for_testing()
def test_ff_default_workflow_env():
assert (default_workflow_env('fourfront')
== default_workflow_env(APP_FOURFRONT)
== 'fourfront-webdev')
with pytest.raises(Exception):
default_workflow_env('foo') # noQA - we expect this error
with pytest.raises(Exception):
default_workflow_env(APP_CGAP) # noQA - we expect this error
@using_fresh_cgap_state_for_testing()
def test_cgap_default_workflow_env():
assert (default_workflow_env('cgap')
== default_workflow_env(APP_CGAP)
== 'cgap-wolf')
with pytest.raises(Exception):
default_workflow_env('foo') # noQA - we expect this error
with pytest.raises(Exception):
default_workflow_env(APP_FOURFRONT) # noQA - we expect this error
@using_orchestrated_behavior()
def test_orchestrated_permit_load_data():
def test_it():
assert permit_load_data(envname=EnvUtils.PRD_ENV_NAME, allow_prod=True, orchestrated_app='cgap') is True
assert permit_load_data(envname=EnvUtils.PRD_ENV_NAME, allow_prod=False, orchestrated_app='cgap') is False
assert permit_load_data(envname=EnvUtils.PRD_ENV_NAME, allow_prod=True, orchestrated_app='fourfront') is True
assert permit_load_data(envname=EnvUtils.PRD_ENV_NAME, allow_prod=False, orchestrated_app='fourfront') is False
assert permit_load_data(envname='anything', allow_prod=True, orchestrated_app='cgap') is True
assert permit_load_data(envname='anything', allow_prod=False, orchestrated_app='cgap') is False
assert permit_load_data(envname='anything', allow_prod=True, orchestrated_app='fourfront') is True
assert permit_load_data(envname='anything', allow_prod=False, orchestrated_app='fourfront') is False
# The orchestrated definition is not dependent on the environment name or orchestrated app,
# so our testing reflects the constancy of effect...
test_it()
with local_attrs(EnvUtils, **CGAP_SETTINGS_FOR_TESTING):
test_it()
with local_attrs(EnvUtils, **FOURFRONT_SETTINGS_FOR_TESTING):
test_it()
@using_orchestrated_behavior()
def test_orchestrated_data_set_for_env():
assert EnvUtils.DEV_DATA_SET_TABLE == {'acme-hotseat': 'prod', 'acme-test': 'test'}
# Production environments are always prod
assert data_set_for_env('acme-prd') == 'prod'
assert data_set_for_env('cgap') == 'prod'
# These are declared in the data sets table
assert data_set_for_env('acme-hotseat') == 'prod'
assert data_set_for_env('acme-test') == 'test'
# These are not declared in the data sets
assert data_set_for_env('acme-mastertest') is None
assert data_set_for_env('acme-dev') is None
assert data_set_for_env('acme-foo') is None
# Production environments are always prod
assert data_set_for_env('acme-prd', 'test') == 'prod'
assert data_set_for_env('cgap', 'test') == 'prod'
# These are declared in the data sets table
assert data_set_for_env('acme-hotseat', 'test') == 'prod'
assert data_set_for_env('acme-test', 'test') == 'test'
# These are not declared in the data sets
assert data_set_for_env('acme-mastertest', 'test') == 'test'
assert data_set_for_env('acme-dev', 'test') == 'test'
assert data_set_for_env('acme-foo', 'test') == 'test'
assert data_set_for_env('acme-stg') is None
assert data_set_for_env('stg') is None
assert data_set_for_env('stg', 'test') == 'test'
assert data_set_for_env('acme-stg', 'test') == 'test'
with local_attrs(EnvUtils, STG_ENV_NAME='acme-stg'):
# Setting EnvUtils.STG_ENV_NAME doesn't work unless "stage_mirroring_enabled": true at top-level.
assert data_set_for_env('acme-stg') is None
assert data_set_for_env('stg') is None
assert data_set_for_env('acme-stg', 'test') == 'test'
assert data_set_for_env('stg', 'test') == 'test'
with stage_mirroring(enabled=True):
assert data_set_for_env('acme-stg') == 'prod'
assert data_set_for_env('stg') == 'prod'
assert data_set_for_env('acme-stg', 'test') == 'prod'
assert data_set_for_env('stg', 'test') == 'prod'
@using_orchestrated_behavior()
def test_get_foursight_bucket_prefix():
with override_environ(GLOBAL_BUCKET_ENV=None, GLOBAL_ENV_BUCKET=None):
with EnvManager.global_env_bucket_named('some-sample-envs'):
# If the global bucket ends in '-envs', we guess
assert get_foursight_bucket_prefix() == 'some-sample'
with EnvManager.global_env_bucket_named('some-sample-environments'):
# If the global bucket doesn't end in '-envs', we don't guess
with pytest.raises(Exception):
get_foursight_bucket_prefix()
with EnvUtils.local_env_utils_for_testing():
some_prefix = 'sample-foursight-bucket-prefix'
EnvUtils.FOURSIGHT_BUCKET_PREFIX = some_prefix
assert get_foursight_bucket_prefix() == some_prefix
with pytest.raises(Exception):
get_foursight_bucket_prefix()
@using_orchestrated_behavior()
def test_orchestrated_public_url_mappings():
sample_table_for_testing = EnvUtils.PUBLIC_URL_TABLE
# This "test" is to show you what's there. Note that the URL doesn't have to have 'cgap' in its name.
# For that matter, the key name doesn't have to be cgap either. But it should be PRD_ENV_NAME
# or something in PUBLIC_URL_TABLE.
public_name_1 = 'cgap'
public_url_1 = 'https://cgap.genetics.example.com'
public_env_1 = 'acme-prd'
public_name_2 = 'stg'
public_url_2 = 'https://staging.genetics.example.com'
public_env_2 = 'acme-stg'
public_name_3 = 'testing'
public_url_3 = 'https://testing.genetics.example.com'
public_env_3 = 'acme-pubtest'
public_name_4 = 'demo'
public_url_4 = 'https://demo.genetics.example.com'
public_env_4 = 'acme-pubdemo'
expected_table = [
{
p.NAME: public_name_1,
p.URL: public_url_1,
p.HOST: urlparse(public_url_1).hostname,
p.ENVIRONMENT: public_env_1,
},
{
p.NAME: public_name_2,
p.URL: public_url_2,
p.HOST: urlparse(public_url_2).hostname,
p.ENVIRONMENT: public_env_2,
},
{
p.NAME: public_name_3,
p.URL: public_url_3,
p.HOST: urlparse(public_url_3).hostname,
p.ENVIRONMENT: public_env_3,
},
{
p.NAME: public_name_4,
p.URL: public_url_4,
p.HOST: urlparse(public_url_4).hostname,
p.ENVIRONMENT: public_env_4,
},
]
assert sample_table_for_testing == expected_table
sample_mapping_for_testing = {
public_name_1: public_url_1,
public_name_2: public_url_2,
public_name_3: public_url_3,
public_name_4: public_url_4,
}
assert public_url_mappings('acme-prd') == sample_mapping_for_testing # PRD_ENV_NAME
assert public_url_mappings('cgap') == sample_mapping_for_testing # member of PUBLIC_URL_TABLE
assert public_url_mappings('acme-foo') == sample_mapping_for_testing # correct prefix ("acme-")
# This last one is slightly different than in legacy where there was a chance of bumping into Foursight.
# The whole point of an orchestrated version is there is no Fourfront in a CGAP ecosystem, or vice versa.
assert public_url_mappings('foo') == sample_mapping_for_testing # everything has same table
@using_orchestrated_behavior()
def test_orchestrated_blue_green_mirror_env():
# This doesn't depend on anything but the name. It's completely a string operation.
# We could just use the legacy version, but it has bugs. This one should instead become the legacy version.
# Should work for basic fourfront
assert blue_green_mirror_env('fourfront-blue') == 'fourfront-green'
assert blue_green_mirror_env('fourfront-green') == 'fourfront-blue'
# Should work for basic cgap
assert blue_green_mirror_env('cgap-blue') == 'cgap-green'
assert blue_green_mirror_env('cgap-green') == 'cgap-blue'
# Anticipated future cases
assert blue_green_mirror_env('cgap-test-blue') == 'cgap-test-green'
assert blue_green_mirror_env('cgap-test-green') == 'cgap-test-blue'
# Things with no mirror have no blue/green in them
assert blue_green_mirror_env('fourfront-cgap') is None
assert blue_green_mirror_env('fourfront-mastertest') is None
assert blue_green_mirror_env('fourfront-yellow') is None
# Edge cases
assert blue_green_mirror_env('xyz-green-1') == 'xyz-blue-1'
assert blue_green_mirror_env('xyz-blue-1') == 'xyz-green-1'
assert blue_green_mirror_env('xyz-blueish') == 'xyz-greenish'
assert blue_green_mirror_env('xyz-greenish') == 'xyz-blueish'
with pytest.raises(ValueError):
blue_green_mirror_env('xyz-blue-green') # needs to be one or the other
@using_orchestrated_behavior()
def test_orchestrated_public_url_for_app():
assert public_url_for_app() == "https://cgap.genetics.example.com"
assert public_url_for_app('cgap') == "https://cgap.genetics.example.com"
with pytest.raises(Exception):
public_url_for_app('fourfront') # The example app is not a fourfront app
with local_attrs(EnvUtils, **CGAP_SETTINGS_FOR_TESTING):
assert public_url_for_app() == "https://cgap.hms.harvard.edu"
assert public_url_for_app('cgap') == "https://cgap.hms.harvard.edu"
with pytest.raises(Exception):
public_url_for_app('fourfront') # A cgap app won't know about fourfront
with local_attrs(EnvUtils, **FOURFRONT_SETTINGS_FOR_TESTING):
assert public_url_for_app() == "https://data.4dnucleome.org"
assert public_url_for_app('fourfront') == "https://data.4dnucleome.org"
with pytest.raises(Exception):
public_url_for_app('cgap') # A fourfront app won't know about fourfront
@using_orchestrated_behavior()
def test_orchestrated_is_cgap_server_for_cgap():
assert is_cgap_server('anything') is True
assert is_cgap_server('anything', allow_localhost=True) is True
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_is_cgap_server_for_fourfront():
assert is_cgap_server('anything') is False
assert is_cgap_server('anything', allow_localhost=True) is False
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_is_fourfront_server_for_fourfront():
assert is_fourfront_server('anything') is True
assert is_fourfront_server('anything', allow_localhost=True) is True
@using_orchestrated_behavior()
def test_orchestrated_is_fourfront_server_for_cgap():
assert is_fourfront_server('anything') is False
assert is_fourfront_server('anything', allow_localhost=True) is False
@using_orchestrated_behavior()
def test_orchestrated_is_cgap_env_for_cgap():
# Non-strings return False
assert is_cgap_env(None) is False
# Anything starting with the prefix we declared ("acme-")
assert is_cgap_env('acme-prd') is True
assert is_cgap_env('acme-foo') is True
# Anything that's in the PUBLIC_URL_TABLE
assert is_cgap_env('cgap') is True # in the cgap table only
assert is_cgap_env('data') is False # in the fourfront table only
# We don't operate on wired substrings now, and these don't have the right prefix.
assert is_cgap_env('fourfront-cgap') is False
assert is_cgap_env('cgap-prod') is False
assert is_cgap_env('fourfront-blue') is False
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_is_cgap_env_for_fourfront():
# Non-strings return False
assert is_cgap_env(None) is False
# This never returns True in a fourfront orchestration
assert is_cgap_env('acme-prd') is False
assert is_cgap_env('acme-foo') is False
assert is_cgap_env('cgap') is False
assert is_cgap_env('data') is False
assert is_cgap_env('fourfront-cgap') is False
assert is_cgap_env('cgap-prod') is False
assert is_cgap_env('fourfront-blue') is False
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_is_fourfront_env_for_fourfront():
# Non-strings return False
assert is_fourfront_env(None) is False
# Anything starting with the prefix we declared ("acme-")
assert is_fourfront_env('acme-prd') is True
assert is_fourfront_env('acme-foo') is True
# Anything that's in the PUBLIC_URL_TABLE
assert is_fourfront_env('data') is True # in the fourfront table only
assert is_fourfront_env('cgap') is False # in the cgap table only
# We don't operate on wired substrings now, and these don't have the right prefix.
assert is_fourfront_env('fourfront-cgap') is False
assert is_fourfront_env('cgap-prod') is False
assert is_fourfront_env('fourfront-blue') is False
@using_orchestrated_behavior()
def test_orchestrated_is_fourfront_env_for_cgap():
# Non-strings return False
assert is_fourfront_env(None) is False
# This never returns True in a fourfront orchestration
assert is_fourfront_env('acme-prd') is False
assert is_fourfront_env('acme-foo') is False
assert is_fourfront_env('data') is False
assert is_fourfront_env('cgap') is False
assert is_fourfront_env('fourfront-cgap') is False
assert is_fourfront_env('cgap-prod') is False
assert is_fourfront_env('fourfront-blue') is False
@using_orchestrated_behavior()
def test_orchestrated_is_stg_or_prd_env_for_cgap():
assert is_stg_or_prd_env('cgap') is True
assert is_stg_or_prd_env('data') is False
assert is_stg_or_prd_env('acme-prd') is True
assert is_stg_or_prd_env('acme-test') is False
assert is_stg_or_prd_env('anything') is False
assert is_stg_or_prd_env('acme-stg') is False
with local_attrs(EnvUtils, STG_ENV_NAME='acme-stg'):
assert is_stg_or_prd_env('acme-stg') is False
with stage_mirroring(enabled=True):
assert is_stg_or_prd_env('acme-stg') is True
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_is_stg_or_prd_env_for_fourfront():
assert is_stg_or_prd_env('data') is True
assert is_stg_or_prd_env('cgap') is False
assert is_stg_or_prd_env('acme-prd') is True
assert is_stg_or_prd_env('acme-test') is False
assert is_stg_or_prd_env('anything') is False
assert is_stg_or_prd_env('acme-stg') is True
# Not declaring a stg_env_name is enough to disable staging.
with local_attrs(EnvUtils, STG_ENV_NAME=None):
assert is_stg_or_prd_env('acme-stg') is False
# Not enabling stage mirroring is enough to disable staging
with stage_mirroring(enabled=False):
assert is_stg_or_prd_env('acme-stg') is False
@using_orchestrated_behavior()
def test_orchestrated_is_test_env_for_cgap():
assert EnvUtils.TEST_ENVS == ['acme-test', 'acme-mastertest', 'acme-pubtest']
assert is_test_env('acme-prd') is False
assert is_test_env('acme-stg') is False
assert is_test_env('acme-test') is True
assert is_test_env('acme-mastertest') is True
assert is_test_env('acme-pubtest') is True
assert is_test_env('testing') is True # Declared for CGAP testing, not for Fourfront testing
assert is_test_env('test') is False # Declared for Fourfront testing, not for CGAP testing
assert is_test_env('acme-supertest') is False # Not a declared test env for either ecosystem
assert is_test_env('foo') is False
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_is_test_env_for_fourfront():
assert EnvUtils.TEST_ENVS == ['acme-test', 'acme-mastertest', 'acme-pubtest']
assert is_test_env('acme-prd') is False
assert is_test_env('acme-stg') is False
assert is_test_env('acme-test') is True
assert is_test_env('acme-mastertest') is True
assert is_test_env('acme-pubtest') is True
assert is_test_env('test') is True # Declared for Fourfront testing, not for CGAP testing
assert is_test_env('testing') is False # Declared for CGAP testing, not for Fourfront testing
assert is_test_env('acme-supertest') is False # Not a declared test env for either ecosystem
assert is_test_env('foo') is False
@using_orchestrated_behavior()
def test_orchestrated_is_hotseat_env_for_cgap():
assert EnvUtils.HOTSEAT_ENVS == ['acme-hotseat', 'acme-pubdemo']
assert is_hotseat_env('acme-prd') is False
assert is_hotseat_env('acme-stg') is False
assert is_hotseat_env('acme-hotseat') is True
assert is_hotseat_env('acme-pubdemo') is True
assert is_hotseat_env('demo') is True # in PUBLIC_URL_TABLE, this is an alias for acme-pubdemo
assert is_hotseat_env('acme-demo') is False # not a declared hotseat env, not in PUBLIC_URL_TABLE
assert is_hotseat_env('foo') is False
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_is_hotseat_env_for_fourfront():
assert EnvUtils.HOTSEAT_ENVS == ['acme-hotseat']
assert is_hotseat_env('acme-prd') is False
assert is_hotseat_env('acme-stg') is False
assert is_hotseat_env('acme-hotseat') is True
assert is_hotseat_env('acme-pubdemo') is False # acme-pubdemo is not a hotseat environments in Fourfront testing
assert is_hotseat_env('demo') is False # PUBLIC_URL_TABLE declares an alias for non-hotseat env acme-pubdemo
assert is_hotseat_env('hot') is True # PUBLIC_URL_TABLE declares an alias for hotseat env acme-hotseat
assert is_hotseat_env('acme-demo') is False # not a declared hotseat env, not in PUBLIC_URL_TABLE
assert is_hotseat_env('foo') is False
@using_orchestrated_behavior
def test_orchestrated_get_env_from_context():
# There was no legacy unit test for this.
# TODO: Write a unit test for both legacy and orchestrated case.
# But for now we're using the legacy definition, and it should be OK.
# Also, this is implicitly tested by functions that call it.
pass
@using_orchestrated_behavior()
def test_orchestrated_get_mirror_env_from_context_without_environ_with_mirror_disabled():
""" Tests that when getting mirror env on various envs returns the correct mirror """
for allow_environ in (False, True):
# If the environment doesn't have either the ENV_NAME or MIRROR_ENV_NAME environment variables,
# it won't matter what value we pass for allow_environ.
with mock.patch.object(os, "environ", {}):
settings = {'env.name': 'acme-prd', 'mirror.env.name': 'anything'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ)
assert mirror is None
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ)
assert mirror is None
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ, allow_guess=False)
assert mirror is None
settings = {'env.name': 'acme-stg'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ)
assert mirror is None
settings = {'env.name': 'acme-test'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ)
assert mirror is None
settings = {'env.name': 'acme-mastertest'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ)
assert mirror is None
@using_orchestrated_behavior()
def test_orchestrated_get_mirror_env_from_context_without_environ_with_mirror_enabled():
""" Tests that when getting mirror env on various envs returns the correct mirror """
with stage_mirroring(enabled=True):
with local_attrs(EnvUtils, STG_ENV_NAME='acme-stg'):
for allow_environ in (False, True):
# If the environment doesn't have either the ENV_NAME or MIRROR_ENV_NAME environment variables,
# it won't matter what value we pass for allow_environ.
with mock.patch.object(os, "environ", {}):
settings = {'env.name': 'acme-prd', 'mirror.env.name': 'anything'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ)
assert mirror == 'anything' # overrides any guess we might make
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ)
assert mirror == 'acme-stg' # Not found in environment, but we can guess
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ, allow_guess=False)
assert mirror is None # Guessing was suppressed
settings = {'env.name': 'acme-stg'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ)
assert mirror == 'acme-prd'
settings = {'env.name': 'acme-test'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ)
assert mirror is None
settings = {'env.name': 'acme-mastertest'}
mirror = get_mirror_env_from_context(settings, allow_environ=allow_environ)
assert mirror is None
@using_orchestrated_behavior()
def test_orchestrated_get_mirror_env_from_context_with_environ_has_env_with_mirror_disabled():
""" Tests override of env name from os.environ when getting mirror env on various envs """
with mock.patch.object(os, "environ", {'ENV_NAME': 'foo'}):
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror is None
with mock.patch.object(os, "environ", {"ENV_NAME": 'acme-stg'}):
settings = {}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror is None
settings = {}
mirror = get_mirror_env_from_context(settings, allow_environ=True, allow_guess=False)
assert mirror is None
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror is None
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=True, allow_guess=False)
assert mirror is None
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=False)
assert mirror is None
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=False, allow_guess=False)
assert mirror is None
with mock.patch.object(os, "environ", {"ENV_NAME": 'acme-prd'}):
settings = {}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror is None
settings = {}
mirror = get_mirror_env_from_context(settings, allow_environ=True, allow_guess=False)
assert mirror is None
@using_orchestrated_behavior()
def test_orchestrated_get_mirror_env_from_context_with_environ_has_env_with_mirror_enabled():
""" Tests override of env name from os.environ when getting mirror env on various envs """
with stage_mirroring(enabled=True):
with local_attrs(EnvUtils, STG_ENV_NAME='acme-stg'):
with mock.patch.object(os, "environ", {'ENV_NAME': 'foo'}):
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror is None # "foo" has no mirror
with mock.patch.object(os, "environ", {"ENV_NAME": 'acme-stg'}):
settings = {}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror == 'acme-prd' # env name explicitly declared, then a guess
settings = {}
mirror = get_mirror_env_from_context(settings, allow_environ=True, allow_guess=False)
assert mirror is None # env name explicitly declared, but guessing disallowed
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror == 'acme-prd' # env name in environ overrides env name in file
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=True, allow_guess=False)
assert mirror is None # env name in environ overrides env name in file, but guessing disallowed
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=False)
assert mirror == 'acme-stg' # env name in environ suppressed
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=False, allow_guess=False)
assert mirror is None # env name in environ suppressed, but guessing disallowed
with mock.patch.object(os, "environ", {"ENV_NAME": 'acme-prd'}):
settings = {}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror
@using_orchestrated_behavior()
def test_orchestrated_get_mirror_env_from_context_with_environ_has_mirror_env_with_mirror_disabled():
""" Tests override of mirror env name from os.environ when getting mirror env on various envs """
with mock.patch.object(os, "environ", {"MIRROR_ENV_NAME": 'bar'}):
settings = {}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror is None
@using_orchestrated_behavior()
def test_orchestrated_get_mirror_env_from_context_with_environ_has_mirror_env_with_mirror_enabled():
""" Tests override of mirror env name from os.environ when getting mirror env on various envs """
with stage_mirroring(enabled=True):
with local_attrs(EnvUtils, STG_ENV_NAME='acme-stg'):
with mock.patch.object(os, "environ", {"MIRROR_ENV_NAME": 'bar'}):
settings = {}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror == 'bar' # explicitly declared, even if nothing else ise
@using_orchestrated_behavior()
def test_orchestrated_get_mirror_env_from_context_with_environ_has_env_and_mirror_env_with_mirror_disabled():
""" Tests override of env name and mirror env name from os.environ when getting mirror env on various envs """
with mock.patch.object(os, "environ", {'ENV_NAME': 'acme-stg', "MIRROR_ENV_NAME": 'bar'}):
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror is None
@using_orchestrated_behavior()
def test_orchestrated_get_mirror_env_from_context_with_environ_has_env_and_mirror_env_with_mirror_enabled():
""" Tests override of env name and mirror env name from os.environ when getting mirror env on various envs """
with stage_mirroring(enabled=True):
with local_attrs(EnvUtils, STG_ENV_NAME='acme-stg'):
with mock.patch.object(os, "environ", {'ENV_NAME': 'acme-stg', "MIRROR_ENV_NAME": 'bar'}):
settings = {'env.name': 'acme-prd'}
mirror = get_mirror_env_from_context(settings, allow_environ=True)
assert mirror == 'bar' # mirror explicitly declared, ignoring env name
@using_orchestrated_behavior()
def test_orchestrated_get_standard_mirror_env_for_cgap():
for mirroring_enabled in [True, False]:
with local_attrs(EnvUtils, STG_ENV_NAME='acme-stg'):
with stage_mirroring(enabled=mirroring_enabled):
def expected_result(value):
return value if mirroring_enabled else None
assert get_standard_mirror_env('acme-prd') == expected_result('acme-stg')
assert get_standard_mirror_env('acme-stg') == expected_result('acme-prd')
assert get_standard_mirror_env('cgap') == expected_result('stg')
assert get_standard_mirror_env('stg') == expected_result('cgap')
assert get_standard_mirror_env('acme-foo') is None
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_get_standard_mirror_env_for_fourfront():
for mirroring_enabled in [True, False]:
with local_attrs(EnvUtils, STG_ENV_NAME='acme-stg'):
with stage_mirroring(enabled=mirroring_enabled):
def expected_result(value):
return value if mirroring_enabled else None
assert get_standard_mirror_env('acme-prd') == expected_result('acme-stg')
assert get_standard_mirror_env('acme-stg') == expected_result('acme-prd')
assert get_standard_mirror_env('data') == expected_result('staging')
assert get_standard_mirror_env('staging') == expected_result('data')
assert get_standard_mirror_env('acme-foo') is None
@using_orchestrated_behavior
def test_orchestrated_infer_repo_from_env_for_cgap():
assert infer_repo_from_env('acme-prd') == 'cgap-portal'
assert infer_repo_from_env('acme-stg') == 'cgap-portal'
assert infer_repo_from_env('acme-test') == 'cgap-portal'
assert infer_repo_from_env('cgap') == 'cgap-portal' # this is a declared name
assert infer_repo_from_env('demo') == 'cgap-portal' # this is a declared name
assert infer_repo_from_env('data') is None
assert infer_repo_from_env('staging') is None
assert infer_repo_from_env('who-knows') is None
assert infer_repo_from_env(None) is None
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_infer_repo_from_env_for_fourfront():
assert infer_repo_from_env('acme-prd') == 'fourfront'
assert infer_repo_from_env('acme-stg') == 'fourfront'
assert infer_repo_from_env('acme-test') == 'fourfront'
assert infer_repo_from_env('cgap') is None
assert infer_repo_from_env('demo') == 'fourfront'
assert infer_repo_from_env('data') == 'fourfront' # this is a declared name
assert infer_repo_from_env('staging') == 'fourfront' # this is a declared name
assert infer_repo_from_env('who-knows') is None
assert infer_repo_from_env(None) is None
CGAP_SETTINGS_FOR_TESTING = dict(
ORCHESTRATED_APP='cgap',
FOURSIGHT_URL_PREFIX='https://u9feld4va7.execute-api.us-east-1.amazonaws.com/api/view/',
FULL_ENV_PREFIX='fourfront-',
DEV_ENV_DOMAIN_SUFFIX=EnvUtils.DEV_SUFFIX_FOR_TESTING,
WEBPROD_PSEUDO_ENV='fourfront-cgap',
PRD_ENV_NAME='fourfront-cgap',
STG_ENV_NAME=None,
PUBLIC_URL_TABLE=[
{
p.NAME: 'cgap',
p.URL: "https://cgap.hms.harvard.edu",
p.HOST: "cgap.hms.harvard.edu",
p.ENVIRONMENT: "fourfront-cgap"
},
]
)
FOURFRONT_SETTINGS_FOR_TESTING = dict(
ORCHESTRATED_APP='fourfront',
FOURSIGHT_URL_PREFIX='https://foursight.4dnucleome.org/api/view/',
FULL_ENV_PREFIX='fourfront-',
DEV_ENV_DOMAIN_SUFFIX=EnvUtils.DEV_SUFFIX_FOR_TESTING,
WEBPROD_PSEUDO_ENV='fourfront-webprod',
PRD_ENV_NAME='fourfront-blue',
STG_ENV_NAME='fourfront-green',
PUBLIC_URL_TABLE=[
{
p.NAME: 'data',
p.URL: "https://data.4dnucleome.org",
p.HOST: "data.4dnucleome.org",
p.ENVIRONMENT: "fourfront-blue"
},
{
p.NAME: 'staging',
p.URL: "https://staging.4dnucleome.org",
p.HOST: "staging.4dnucleome.org",
p.ENVIRONMENT: "fourfront-green"
}
]
)
@using_orchestrated_behavior()
def test_orchestrated_foursight_env_name():
assert foursight_env_name('acme-prd') == 'cgap' # this is in our test ecosystem's public_urL_table
assert foursight_env_name('acme-mastertest') == 'mastertest' # the rest of these are short names
assert foursight_env_name('acme-webdev') == 'webdev'
assert foursight_env_name('acme-hotseat') == 'hotseat'
with stage_mirroring(enabled=True):
with local_attrs(EnvUtils, **FOURFRONT_SETTINGS_FOR_TESTING): # PRD = fourfront-blue, STG = fourfront-green
assert foursight_env_name('fourfront-blue') == 'data' # this is in the public_url_table
assert foursight_env_name('fourfront-green') == 'staging' # this is, too
assert foursight_env_name('fourfront-mastertest') == 'mastertest' # these are short names
assert foursight_env_name('fourfront-hotseat') == 'hotseat'
@using_orchestrated_behavior()
def test_orchestrated_infer_foursight_from_env():
dev_suffix = EnvUtils.DEV_SUFFIX_FOR_TESTING
class MockedRequest:
def __init__(self, domain):
self.domain = domain
def mock_request(domain): # build a dummy request with the 'domain' member, checked in the method
return MockedRequest(domain)
assert infer_foursight_from_env(request=mock_request('acme-prd' + dev_suffix),
envname='acme-prd') == 'cgap'
assert infer_foursight_from_env(request=mock_request('acme-mastertest' + dev_suffix),
envname='acme-mastertest') == 'mastertest'
assert infer_foursight_from_env(request=mock_request('acme-webdev' + dev_suffix),
envname='acme-webdev') == 'webdev'
assert infer_foursight_from_env(request=mock_request('acme-hotseat' + dev_suffix),
envname='acme-hotseat') == 'hotseat'
with stage_mirroring(enabled=True):
with local_attrs(EnvUtils, **FOURFRONT_SETTINGS_FOR_TESTING): # PRD = Blue, STG = Green
# (active) fourfront testing environments
assert infer_foursight_from_env(request=mock_request('fourfront-mastertest' + dev_suffix),
envname='fourfront-mastertest') == 'mastertest'
assert infer_foursight_from_env(request=mock_request('fourfront-webdev' + dev_suffix),
envname='fourfront-webdev') == 'webdev'
assert infer_foursight_from_env(request=mock_request('fourfront-hotseat' + dev_suffix),
envname='fourfront-hotseat') == 'hotseat'
# (active) fourfront production environments
assert (infer_foursight_from_env(request=mock_request(domain='data.4dnucleome.org'),
envname='fourfront-blue')
== 'data')
assert (infer_foursight_from_env(request=mock_request(domain='data.4dnucleome.org'),
envname='fourfront-green')
== 'staging') # Inconsistent args. The envname is used in preference to the request
assert (infer_foursight_from_env(request=mock_request(domain='staging.4dnucleome.org'),
envname='fourfront-blue')
== 'data') # Inconsistent args. The envname is used in preference to the request
assert (infer_foursight_from_env(request=mock_request(domain='staging.4dnucleome.org'),
envname='fourfront-green')
== 'staging')
# These next four are pathological and hopefully not used, but they illustrate that the domain dominates.
# This does not illustrate intended use.
assert (infer_foursight_from_env(request=mock_request(domain='data.4dnucleome.org'), envname='data')
== 'data')
assert (infer_foursight_from_env(request=mock_request(domain='data.4dnucleome.org'), envname='staging')
== 'staging') # Inconsistent args. The envname is used in preference to the request
assert (infer_foursight_from_env(request=mock_request(domain='staging.4dnucleome.org'), envname='data')
== 'data') # Inconsistent args. The envname is used in preference to the request
assert (infer_foursight_from_env(request=mock_request(domain='staging.4dnucleome.org'), envname='staging')
== 'staging')
assert (infer_foursight_from_env(request='data.4dnucleome.org', envname='data') == 'data')
# Inconsistent args. The envname is used in preference to the request
assert (infer_foursight_from_env(request='data.4dnucleome.org', envname='staging') == 'staging')
assert (infer_foursight_from_env(request='https://data.4dnucleome.org', envname='data') == 'data')
# Inconsistent args. The envname is used in preference to the request
assert (infer_foursight_from_env(request='https://data.4dnucleome.org', envname='staging') == 'staging')
# Inconsistent args. The envname is used in preference to the request
assert (infer_foursight_from_env(request='staging.4dnucleome.org', envname='data') == 'data')
assert (infer_foursight_from_env(request='staging.4dnucleome.org', envname='staging') == 'staging')
# Inconsistent args. The envname is used in preference to the request
assert (infer_foursight_from_env(request='http://staging.4dnucleome.org', envname='data') == 'data')
assert (infer_foursight_from_env(request='http://staging.4dnucleome.org', envname='staging') == 'staging')
assert (infer_foursight_from_env(request=None, envname='data') == 'data')
assert (infer_foursight_from_env(request=None, envname='staging') == 'staging')
# (active) cgap environments
with local_attrs(EnvUtils, **CGAP_SETTINGS_FOR_TESTING):
assert infer_foursight_from_env(request=mock_request('fourfront-cgapdev' + dev_suffix),
envname='fourfront-cgapdev') == 'cgapdev'
assert infer_foursight_from_env(request=mock_request('fourfront-cgaptest' + dev_suffix),
envname='fourfront-cgaptest') == 'cgaptest'
assert infer_foursight_from_env(request=mock_request('fourfront-cgapwolf' + dev_suffix),
envname='fourfront-cgapwolf') == 'cgapwolf'
assert infer_foursight_from_env(request=mock_request('fourfront-cgap' + dev_suffix),
envname='fourfront-cgap') == 'cgap'
assert infer_foursight_from_env(request=mock_request('cgap.hms.harvard.edu'),
envname='fourfront-cgap') == 'cgap'
assert infer_foursight_from_env(request=mock_request('cgap.hms.harvard.edu'),
envname='cgap') == 'cgap'
@pytest.mark.skip
@using_orchestrated_behavior()
def test_orchestrated_indexer_env_for_env():
assert EnvUtils.INDEXER_ENV_NAME == 'acme-indexer'
# The indexer does not think it has an indexer
assert indexer_env_for_env('acme-indexer') is None
# All other environments use a canonical indexer
assert indexer_env_for_env('acme-prd') == 'acme-indexer'
assert indexer_env_for_env('acme-test') == 'acme-indexer'
assert indexer_env_for_env('acme-anything') == 'acme-indexer'
assert indexer_env_for_env('blah-blah') == 'acme-indexer'
@using_orchestrated_behavior()
def test_orchestrated_indexer_env_for_env_disabled():
assert EnvUtils.INDEXER_ENV_NAME == 'acme-indexer'
# We've disabled calls to this. The indexer isn't done this way in containers.
for env in ['acme-indexer', 'acme-prd', 'acme-test', 'acme-anything', 'blah-blah']:
# We tried raising an error and opted to just return None
# with pytest.raises(BeanstalkOperationNotImplemented):
assert indexer_env_for_env(env) is None
@pytest.mark.skip
@using_orchestrated_behavior()
def test_orchestrated_is_indexer_env():
assert EnvUtils.INDEXER_ENV_NAME == 'acme-indexer'
# This should be true for the indexer env, False for others
assert is_indexer_env('acme-indexer') is True
assert is_indexer_env('acme-prd') is False
assert is_indexer_env('acme-test') is False
assert is_indexer_env('acme-foo') is False
assert is_indexer_env('pretty-much-anything') is False
@using_orchestrated_behavior()
def test_orchestrated_is_indexer_env_disabled():
assert EnvUtils.INDEXER_ENV_NAME == 'acme-indexer'
# We've disabled calls to this. The indexer isn't done this way in containers.
for env in ['acme-indexer', 'acme-prd', 'acme-test', 'acme-anything', 'blah-blah']:
# We tried raising an error and opted to just return False
# with pytest.raises(BeanstalkOperationNotImplemented):
assert is_indexer_env(env) is False
@using_orchestrated_behavior()
def test_orchestrated_short_env_name():
assert short_env_name(None) is None
assert short_env_name('demo') == 'pubdemo'
assert short_env_name('anything') == 'anything'
assert short_env_name('acme-anything') == 'anything'
assert short_env_name('cgap-anything') == 'cgap-anything'
assert short_env_name('fourfront-cgapfoo') == 'fourfront-cgapfoo'
assert short_env_name('fourfront-anything') == 'fourfront-anything'
with local_attrs(EnvUtils, **CGAP_SETTINGS_FOR_TESTING): # Legacy CGAP settings use a 'fourfront-' prefix!
assert short_env_name(None) is None
assert short_env_name('demo') == 'demo'
assert short_env_name('anything') == 'anything'
assert short_env_name('acme-anything') == 'acme-anything'
assert short_env_name('cgap-anything') == 'cgap-anything'
assert short_env_name('fourfront-cgapfoo') == 'cgapfoo'
assert short_env_name('fourfront-anything') == 'anything'
with local_attrs(EnvUtils, FULL_ENV_PREFIX='cgap-'): # Of course, we could have defined it otherwise.
assert short_env_name(None) is None
assert short_env_name('demo') == 'demo'
assert short_env_name('anything') == 'anything'
assert short_env_name('acme-anything') == 'acme-anything'
assert short_env_name('cgap-anything') == 'anything'
assert short_env_name('fourfront-cgapfoo') == 'fourfront-cgapfoo'
assert short_env_name('fourfront-anything') == 'fourfront-anything'
with local_attrs(EnvUtils, **FOURFRONT_SETTINGS_FOR_TESTING):
assert short_env_name(None) is None
assert short_env_name('demo') == 'demo'
assert short_env_name('anything') == 'anything'
assert short_env_name('acme-anything') == 'acme-anything'
assert short_env_name('cgap-anything') == 'cgap-anything'
assert short_env_name('fourfront-cgapfoo') == 'cgapfoo'
assert short_env_name('fourfront-anything') == 'anything'
@using_orchestrated_behavior()
def test_orchestrated_full_env_name():
assert full_env_name('cgap') == 'acme-prd'
assert full_env_name('acme-foo') == 'acme-foo'
assert full_env_name('foo') == 'acme-foo'
with pytest.raises(Exception):
full_env_name(None)
with local_attrs(EnvUtils, **FOURFRONT_SETTINGS_FOR_TESTING):
assert full_env_name('cgapdev') == 'fourfront-cgapdev'
assert full_env_name('mastertest') == 'fourfront-mastertest'
assert full_env_name('fourfront-cgapdev') == 'fourfront-cgapdev'
assert full_env_name('fourfront-mastertest') == 'fourfront-mastertest'
# Does not require a registered env
assert full_env_name('foo') == 'fourfront-foo'
assert full_env_name('cgapfoo') == 'fourfront-cgapfoo'
# In legacy mode, these raise ValueError, but here we know we are in a cgap env, so just do the normal thing.
# fourfront would be in its own orchestrated account.
assert full_env_name('data') == 'fourfront-blue'
assert full_env_name('staging') == 'fourfront-green'
# In an orchestrated Fourfront, the name 'cgap' is not special, but coincidentally selects the same name.
assert full_env_name('cgap') == 'fourfront-cgap'
with local_attrs(EnvUtils, **CGAP_SETTINGS_FOR_TESTING):
assert full_env_name('cgapdev') == 'fourfront-cgapdev'
assert full_env_name('mastertest') == 'fourfront-mastertest'
assert full_env_name('fourfront-cgapdev') == 'fourfront-cgapdev'
assert full_env_name('fourfront-mastertest') == 'fourfront-mastertest'
# Does not require a registered env
assert full_env_name('foo') == 'fourfront-foo'
assert full_env_name('cgapfoo') == 'fourfront-cgapfoo'
# In an orchestrated CGAP, the names 'data' and 'staging' are not special
assert full_env_name('data') == 'fourfront-data'
assert full_env_name('staging') == 'fourfront-staging'
# The name 'cgap' is found in PUBLIC_URL_TABLE, but happens by coincidence to expand the obvious way.
assert full_env_name('cgap') == 'fourfront-cgap'
@using_orchestrated_behavior()
def test_orchestrated_full_cgap_env_name_for_cgap():
assert full_cgap_env_name('foo') == 'acme-foo'
assert full_cgap_env_name('acme-foo') == 'acme-foo'
assert full_cgap_env_name('cgap') == 'acme-prd'
assert full_cgap_env_name('test') == 'acme-test'
with pytest.raises(Exception):
full_cgap_env_name(None)
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_full_cgap_env_name_for_fourfront():
# Everything is just going to return errors if you try this in a Fourfront orchestration
with pytest.raises(ValueError):
full_cgap_env_name('foo')
with pytest.raises(ValueError):
full_cgap_env_name('acme-foo')
with pytest.raises(ValueError):
full_cgap_env_name('cgap')
with pytest.raises(ValueError):
full_cgap_env_name('test')
@using_orchestrated_behavior()
def test_orchestrated_full_cgap_env_name_for_simulated_legacy_cgap():
with local_attrs(EnvUtils, **CGAP_SETTINGS_FOR_TESTING):
assert full_cgap_env_name('cgap') == 'fourfront-cgap'
assert full_cgap_env_name('cgapdev') == 'fourfront-cgapdev'
assert full_cgap_env_name('fourfront-cgapdev') == 'fourfront-cgapdev'
# Does not require a registered env
assert full_cgap_env_name('cgapfoo') == 'fourfront-cgapfoo'
# This was an error in legacy CGAP because we couldn't tell if it was fourfront we were talking about.
# In an orchestrated version, this name is available for use.
assert full_cgap_env_name('mastertest') == 'fourfront-mastertest'
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_orchestrated_full_fourfront_env_name_for_fourfront():
assert full_fourfront_env_name('foo') == 'acme-foo'
assert full_fourfront_env_name('acme-foo') == 'acme-foo'
assert full_fourfront_env_name('cgap') == 'acme-cgap' # cgap is just an ordinary name in a fourfront orchestration
assert full_fourfront_env_name('test') == 'acme-pubtest'
with pytest.raises(Exception):
full_fourfront_env_name(None)
@using_orchestrated_behavior()
def test_orchestrated_full_fourfront_env_name_for_cgap():
# Everything is just going to return errors if you try this in a Fourfront orchestration
with pytest.raises(ValueError):
full_fourfront_env_name('foo')
with pytest.raises(ValueError):
full_fourfront_env_name('acme-foo')
with pytest.raises(ValueError):
full_fourfront_env_name('cgap')
with pytest.raises(ValueError):
full_fourfront_env_name('test')
@using_orchestrated_behavior()
def test_orchestrated_classify_server_url_localhost():
assert classify_server_url("http://localhost/foo/bar") == {
'kind': 'localhost',
'environment': 'unknown',
'bucket_env': 'unknown',
'server_env': 'unknown',
'is_stg_or_prd': False,
'public_name': None,
}
assert classify_server_url("http://localhost:8000/foo/bar") == {
'kind': 'localhost',
'environment': 'unknown',
'bucket_env': 'unknown',
'server_env': 'unknown',
'is_stg_or_prd': False,
'public_name': None,
}
assert classify_server_url("http://localhost:1234/foo/bar") == {
'kind': 'localhost',
'environment': 'unknown',
'bucket_env': 'unknown',
'server_env': 'unknown',
'is_stg_or_prd': False,
'public_name': None,
}
assert classify_server_url("http://127.0.0.1:8000/foo/bar") == {
'kind': 'localhost',
'environment': 'unknown',
'bucket_env': 'unknown',
'server_env': 'unknown',
'is_stg_or_prd': False,
'public_name': None,
}
@using_orchestrated_behavior()
def test_orchestrated_classify_server_url_cgap():
with local_attrs(EnvUtils, **CGAP_SETTINGS_FOR_TESTING):
assert classify_server_url("https://cgap.hms.harvard.edu/foo/bar") == {
'kind': 'cgap',
'environment': 'fourfront-cgap',
'bucket_env': 'fourfront-cgap',
'server_env': 'fourfront-cgap',
'is_stg_or_prd': True,
'public_name': 'cgap',
}
for env in ['cgapdev', 'cgapwolf']:
url = f"http://{EnvUtils.FULL_ENV_PREFIX}{env}{EnvUtils.DEV_SUFFIX_FOR_TESTING}/foo/bar"
assert classify_server_url(url) == {
'kind': 'cgap',
'environment': f"fourfront-{env}",
'bucket_env': f"fourfront-{env}",
'server_env': f"fourfront-{env}",
'is_stg_or_prd': False,
'public_name': None,
}
@using_orchestrated_behavior()
def test_orchestrated_classify_server_url_fourfront():
with stage_mirroring(enabled=True):
with local_attrs(EnvUtils, **FOURFRONT_SETTINGS_FOR_TESTING):
assert classify_server_url("https://data.4dnucleome.org/foo/bar") == {
'kind': 'fourfront',
'environment': 'fourfront-webprod',
'bucket_env': 'fourfront-webprod',
'server_env': 'fourfront-blue',
'is_stg_or_prd': True,
'public_name': 'data',
}
assert classify_server_url("https://staging.4dnucleome.org/foo/bar") == {
'kind': 'fourfront',
'environment': 'fourfront-webprod',
'bucket_env': 'fourfront-webprod',
'server_env': 'fourfront-green',
'is_stg_or_prd': True,
'public_name': 'staging',
}
assert classify_server_url(f"http://fourfront-blue{EnvUtils.DEV_SUFFIX_FOR_TESTING}/foo/bar") == {
'kind': 'fourfront',
'environment': 'fourfront-webprod',
'bucket_env': 'fourfront-webprod',
'server_env': 'fourfront-blue',
'is_stg_or_prd': True,
'public_name': 'data',
}
assert classify_server_url(f"http://fourfront-green{EnvUtils.DEV_SUFFIX_FOR_TESTING}/foo/bar") == {
'kind': 'fourfront',
'environment': 'fourfront-webprod',
'bucket_env': 'fourfront-webprod',
'server_env': 'fourfront-green',
'is_stg_or_prd': True,
'public_name': 'staging',
}
assert classify_server_url(f"http://fourfront-mastertest{EnvUtils.DEV_SUFFIX_FOR_TESTING}/foo/bar") == {
'kind': 'fourfront',
'environment': 'fourfront-mastertest',
'bucket_env': 'fourfront-mastertest',
'server_env': 'fourfront-mastertest',
'is_stg_or_prd': False,
'public_name': None,
}
@using_orchestrated_behavior()
def test_orchestrated_classify_server_url_other():
with raises_regexp(RuntimeError, "not a cgap server"):
classify_server_url("http://google.com") # raise_error=True is the default
with raises_regexp(RuntimeError, "not a cgap server"):
classify_server_url("http://google.com", raise_error=True)
assert classify_server_url("http://google.com", raise_error=False) == {
c.KIND: 'unknown',
c.ENVIRONMENT: 'unknown',
c.BUCKET_ENV: 'unknown',
c.SERVER_ENV: 'unknown',
c.IS_STG_OR_PRD: False,
c.PUBLIC_NAME: None,
}
# The function make_env_name_cfn_compatible has been removed because I think no one uses it. -kmp 15-May-2022
#
# @using_orchestrated_behavior()
# @pytest.mark.parametrize('env_name, cfn_id', [
# ('acme-foo', 'acmefoo'),
# ('foo-bar-baz', 'foobarbaz'),
# ('cgap-mastertest', 'cgapmastertest'),
# ('fourfront-cgap', 'fourfrontcgap'),
# ('cgap-msa', 'cgapmsa'),
# ('fourfrontmastertest', 'fourfrontmastertest')
# ])
# def test_orchestrated_make_env_name_cfn_compatible(env_name, cfn_id):
# assert make_env_name_cfn_compatible(env_name) == cfn_id
@using_orchestrated_behavior()
def test_get_foursight_bucket():
bucket_table = EnvUtils.FOURSIGHT_BUCKET_TABLE
# Uncomment the following line to see the table we're working with.
print(f"Testing get_foursight_bucket relative to: {json.dumps(bucket_table, indent=2)}")
ignorable(json, bucket_table) # Keeps lint tools from complaining when the above line is commented out.
bucket = get_foursight_bucket(envname='acme-prd', stage='dev')
assert bucket == 'acme-foursight-dev-prd'
bucket = get_foursight_bucket(envname='acme-prd', stage='prod')
assert bucket == 'acme-foursight-prod-prd'
bucket = get_foursight_bucket(envname='acme-stg', stage='dev')
assert bucket == 'acme-foursight-dev-stg'
bucket = get_foursight_bucket(envname='acme-stg', stage='prod')
assert bucket == 'acme-foursight-prod-stg'
bucket = get_foursight_bucket(envname='acme-foo', stage='dev')
assert bucket == 'acme-foursight-dev-other'
bucket = get_foursight_bucket(envname='acme-foo', stage='prod')
assert bucket == 'acme-foursight-prod-other'
with local_attrs(EnvUtils, FOURSIGHT_BUCKET_TABLE="not-a-dict"):
with EnvUtils.local_env_utils_for_testing():
EnvUtils.FOURSIGHT_BUCKET_PREFIX = 'alpha-omega'
EnvUtils.FOURSIGHT_BUCKET_TABLE = None
EnvUtils.FULL_ENV_PREFIX = 'acme-'
assert full_env_name(envname='acme-foo') == 'acme-foo'
assert short_env_name(envname='acme-foo') == 'foo'
assert full_env_name(envname='acme-stg') == 'acme-stg'
assert short_env_name(envname='acme-stg') == 'stg'
assert infer_foursight_from_env(envname='acme-foo') == 'foo'
assert infer_foursight_from_env(envname='acme-stg') == 'stg'
assert get_foursight_bucket(envname='acme-foo', stage='prod') == 'alpha-omega-prod-acme-foo'
assert get_foursight_bucket(envname='acme-stg', stage='dev') == 'alpha-omega-dev-acme-stg'
assert get_foursight_bucket(envname='acme-foo', stage='prod') == 'alpha-omega-prod-acme-foo'
EnvUtils.FOURSIGHT_BUCKET_TABLE = None
assert get_foursight_bucket(envname='acme-foo', stage='prod') == 'alpha-omega-prod-acme-foo'
EnvUtils.FOURSIGHT_BUCKET_TABLE = {}
assert get_foursight_bucket(envname='acme-foo', stage='prod') == 'alpha-omega-prod-acme-foo'
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_FOURFRONT_TESTING)
def test_ff_get_env_real_url():
# ===== Explicitly Defined URLs =====
# By special name
assert get_env_real_url('data') == 'https://genetics.example.com'
assert get_env_real_url('staging') == 'https://stg.genetics.example.com'
assert get_env_real_url('test') == 'https://testing.genetics.example.com'
# By environment long name
assert get_env_real_url('acme-prd') == 'https://genetics.example.com'
assert get_env_real_url('acme-stg') == 'https://stg.genetics.example.com'
assert get_env_real_url('acme-pubtest') == 'https://testing.genetics.example.com'
# By environment short name
assert get_env_real_url('prd') == 'https://genetics.example.com'
assert get_env_real_url('stg') == 'https://stg.genetics.example.com'
assert get_env_real_url('pubtest') == 'https://testing.genetics.example.com'
# ==== Other URLs are built from DEV_SUFFIX =====
dev_suffix = EnvUtils.DEV_ENV_DOMAIN_SUFFIX
for env in ['acme-mastertest', 'acme-foo', "testing",
# It doesn't work to add 'acme-' to the front of the special names
'acme-data', 'acme-staging', 'acme-test']:
# Note use of 'http' because Forfront prefers that.
assert get_env_real_url(env) == f'http://{short_env_name(env)}{dev_suffix}'
@using_orchestrated_behavior(data=EnvUtils.SAMPLE_TEMPLATE_FOR_CGAP_TESTING)
def test_cgap_get_env_real_url():
# ===== Explicitly Defined URLs =====
# By special name
assert get_env_real_url('cgap') == 'https://cgap.genetics.example.com'
assert get_env_real_url('stg') == 'https://staging.genetics.example.com'
assert get_env_real_url('testing') == 'https://testing.genetics.example.com'
# By environment long name
assert get_env_real_url('acme-prd') == 'https://cgap.genetics.example.com'
assert get_env_real_url('acme-stg') == 'https://staging.genetics.example.com'
assert get_env_real_url('acme-pubtest') == 'https://testing.genetics.example.com'
# By environment short name
assert get_env_real_url('prd') == 'https://cgap.genetics.example.com'
assert get_env_real_url('stg') == 'https://staging.genetics.example.com'
assert get_env_real_url('pubtest') == 'https://testing.genetics.example.com'
# ==== Other URLs are built from DEV_SUFFIX =====
# These are not wired in, and end up defaulting Fourfront-style
dev_suffix = EnvUtils.DEV_ENV_DOMAIN_SUFFIX
for env in ['acme-mastertest', 'acme-foo', 'staging', 'test',
# It doesn't work to add 'acme-' to the front of the special names, except 'stg' is actually also
# a short name of an environment in this example, and so it does work to do that.
# (We tested that above.)
'acme-cgap', 'acme-testing']:
# Note:
# * Uses 'https' uniformly for security reasons.
# * Uses full env name.
assert get_env_real_url(env) == f'https://{env}{dev_suffix}'
def test_app_case():
with local_attrs(EnvUtils, ORCHESTRATED_APP=APP_CGAP):
assert EnvUtils.app_case(if_cgap='foo', if_fourfront='bar') == 'foo'
with local_attrs(EnvUtils, ORCHESTRATED_APP=APP_FOURFRONT):
assert EnvUtils.app_case(if_cgap='foo', if_fourfront='bar') == 'bar'
with local_attrs(EnvUtils, ORCHESTRATED_APP='whatever'):
with pytest.raises(ValueError):
EnvUtils.app_case(if_cgap='foo', if_fourfront='bar')
def test_app_name():
assert EnvUtils.app_name() == EnvUtils.ORCHESTRATED_APP
with local_attrs(EnvUtils, ORCHESTRATED_APP=APP_CGAP):
assert EnvUtils.app_name() == APP_CGAP
with local_attrs(EnvUtils, ORCHESTRATED_APP=APP_FOURFRONT):
assert EnvUtils.app_name() == APP_FOURFRONT
with local_attrs(EnvUtils, ORCHESTRATED_APP='whatever'):
assert EnvUtils.app_name() == 'whatever'
def test_get_config_ecosystem_from_s3():
with mock.patch.object(EnvUtils, "_get_config_object_from_s3") as mock_get_config_object_from_s3:
main_ecosystem = {"ecosystem": "blue"}
blue_ecosystem = {"_ecosystem_name": "blue"}
green_ecosystem = {"_ecosystem_name": "green"}
cgap_foo = {
"ff_env": "cgap-foo",
"es": "http://es.etc",
"fourfront": "http://fourfront.etc",
"ecosystem": "main"
}
cgap_bar = {
"ff_env": "cgap-bar",
"es": "http://es.etc",
"fourfront": "http://fourfront.etc"
}
bucket_for_testing = 'bucket-for-testing'
cgap_ping = {
"ff_env": "cgap-ping",
"es": "http://es.etc",
"fourfront": "http://fourfront.etc",
"ecosystem": "ping"
}
cgap_pong = {
"ff_env": "cgap-pong",
"es": "http://es.etc",
"fourfront": "http://fourfront.etc",
"ecosystem": "pong"
}
ping_ecosystem = {"ecosystem": "pong"}
pong_ecosystem = {"ecosystem": "ping"}
circular_testing_bucket = 'circular-testing-bucket'
envs = {
bucket_for_testing: {
"cgap-foo": cgap_foo,
"cgap-bar": cgap_bar,
"main.ecosystem": main_ecosystem,
"blue.ecosystem": blue_ecosystem,
"green.ecosystem": green_ecosystem
},
circular_testing_bucket: {
"cgap-ping": cgap_ping,
"cgap-pong": cgap_pong,
"ping.ecosystem": ping_ecosystem,
"pong.ecosystem": pong_ecosystem
}
}
def mocked_get_config_object_from_s3(env_bucket, config_key):
try:
return envs[env_bucket][config_key]
except Exception as e:
# Typically an error raised due to boto3 S3 issues will end up getting caught and repackaged this way:
raise EnvUtilsLoadError("Mocked bucket/key lookup failed.",
env_bucket=env_bucket, config_key=config_key,
encapsulated_error=e)
mock_get_config_object_from_s3.side_effect = mocked_get_config_object_from_s3
expected = cgap_bar
actual = EnvUtils._get_config_ecosystem_from_s3(env_bucket=bucket_for_testing, config_key='cgap-bar')
assert actual == expected
expected = blue_ecosystem
actual = EnvUtils._get_config_ecosystem_from_s3(env_bucket=bucket_for_testing, config_key='cgap-foo')
assert actual == expected
expected = blue_ecosystem
actual = EnvUtils._get_config_ecosystem_from_s3(env_bucket=bucket_for_testing, config_key='main.ecosystem')
assert actual == expected
expected = blue_ecosystem
actual = EnvUtils._get_config_ecosystem_from_s3(env_bucket=bucket_for_testing, config_key='blue.ecosystem')
assert actual == expected
expected = green_ecosystem
actual = EnvUtils._get_config_ecosystem_from_s3(env_bucket=bucket_for_testing, config_key='green.ecosystem')
assert actual == expected
with pytest.raises(EnvUtilsLoadError):
EnvUtils._get_config_ecosystem_from_s3(env_bucket=bucket_for_testing, config_key='missing')
# Remaining tests test circularity.
# We just stop at the point of being pointed back to something we've seen before.
expected = pong_ecosystem
actual = EnvUtils._get_config_ecosystem_from_s3(env_bucket=circular_testing_bucket, config_key='cgap-ping')
assert actual == expected
expected = ping_ecosystem
actual = EnvUtils._get_config_ecosystem_from_s3(env_bucket=circular_testing_bucket, config_key='cgap-pong')
assert actual == expected
expected = pong_ecosystem
actual = EnvUtils._get_config_ecosystem_from_s3(env_bucket=circular_testing_bucket, config_key='ping.ecosystem')
assert actual == expected
expected = ping_ecosystem
actual = EnvUtils._get_config_ecosystem_from_s3(env_bucket=circular_testing_bucket, config_key='pong.ecosystem')
assert actual == expected
def test_make_no_legacy():
def foo(a, b, *, c):
return [a, b, c]
foo_prime = _make_no_legacy(foo, 'foo')
with pytest.raises(NotImplementedError) as exc:
foo_prime(3, 4, c=7)
assert str(exc.value) == ("There is only an orchestrated version of foo, not a legacy version."
" args=(3, 4) kwargs={'c': 7}")
def test_set_declared_data_legacy():
with local_attrs(LegacyController, LEGACY_DISPATCH_ENABLED=False):
with pytest.raises(LegacyDispatchDisabled) as exc:
EnvUtils.set_declared_data({'is_legacy': True})
assert str(exc.value) == ('Attempt to use legacy operation set_declared_data'
' with args=None kwargs=None mode=load-env.')
def test_if_orchestrated_various_legacy_errors():
def foo(x):
return ['foo', x]
def bar(x):
return ['bar', x]
def baz(x):
if x == 99:
raise UseLegacy()
return ['baz', x]
with local_attrs(LegacyController, LEGACY_DISPATCH_ENABLED=False):
with pytest.raises(LegacyDispatchDisabled) as exc:
if_orchestrated(use_legacy=True)(foo)
# This error message could be better. The args aren't really involved. But it gets its point across
# and anyway it should never happen. We're testing it just for coverage's sake. -kmp 25-Sep-2022
assert str(exc.value) == "Attempt to use legacy operation foo with args=None kwargs=None mode=decorate."
with local_attrs(LegacyController, LEGACY_DISPATCH_ENABLED=True):
foo_prime = if_orchestrated(use_legacy=True)(foo)
bar_prime = if_orchestrated(unimplemented=True)(bar)
baz_prime = if_orchestrated(assumes_cgap=True)(baz)
with local_attrs(LegacyController, LEGACY_DISPATCH_ENABLED=False):
with pytest.raises(NotImplementedError) as exc:
foo_prime(3)
assert str(exc.value) == ("There is only an orchestrated version of foo,"
" not a legacy version. args=(3,) kwargs={}")
with pytest.raises(NotImplementedError) as exc:
bar_prime(3)
assert str(exc.value) == "Unimplemented: test.test_env_utils_orchestrated.bar"
with local_attrs(EnvUtils, ORCHESTRATED_APP='cgap'):
assert baz_prime(3) == ['baz', 3]
with pytest.raises(LegacyDispatchDisabled) as exc:
baz_prime(99)
assert str(exc.value) == "Attempt to use legacy operation baz with args=(99,) kwargs={} mode=raised."
with local_attrs(LegacyController, LEGACY_DISPATCH_ENABLED=True):
with pytest.raises(NotImplementedError) as exc:
assert baz_prime(3) == ['baz', 3]
baz_prime(99) # This will try to use the legacy version, which is enabled but doesn't exist.
assert str(exc.value) == ("There is only an orchestrated version of baz,"
" not a legacy version. args=(99,) kwargs={}")
with local_attrs(EnvUtils, ORCHESTRATED_APP='fourfront'):
with pytest.raises(NotImplementedError) as exc:
baz_prime(3)
assert 'Non-cgap applications are not supported.' in str(exc.value)
with pytest.raises(LegacyDispatchDisabled) as exc:
if_orchestrated(use_legacy=True)(legacy_blue_green_mirror_env)
assert str(exc.value) == ("Attempt to use legacy operation blue_green_mirror_env"
" with args=None kwargs=None mode=decorate.")
bg = if_orchestrated()(legacy_blue_green_mirror_env)
with local_attrs(EnvUtils, IS_LEGACY=True):
assert bg('acme-green') == 'acme-blue'
with local_attrs(LegacyController, LEGACY_DISPATCH_ENABLED=False):
with pytest.raises(LegacyDispatchDisabled) as exc:
assert bg('acme-green') == 'acme-blue'
assert str(exc.value) == ("Attempt to use legacy operation blue_green_mirror_env"
" with args=('acme-green',) kwargs={} mode=dispatch.")
@using_orchestrated_behavior
def test_env_equals():
assert env_equals('same', 'same')
assert env_equals('acme-prd', 'cgap')
assert env_equals('cgap', 'acme-prd')
assert env_equals('', '')
assert not env_equals('cgap', 'foobar')
assert not env_equals('foobar', 'cgap')
assert not env_equals('', 'cgap')
assert not env_equals('cgap', '')
|
def answer(s):
if len(s) <=200 and len(s) > 0:
count = 0
#for i in range(len(s)):
while s[:len(s)/2] == s[len(s)/2:]:
count += 1
s = s[:len(s)/2]
count += 1
print "pattern: %s" %(len(s))
print "count: %s" %(count)
s = "abccbaabccba"
print answer(s) |
import random
import copy
def clearScreen():
print(chr(27) + "[2J") # Escape sequence to clear screen
def generateBoard():
whitelist = list(range(1,92))
board = [list(), list(), list(), list(),list()]
for x in range(0,5):
for y in range(0,5):
i = random.randint(0,len(whitelist) - 1)
num = whitelist[i]
del whitelist[i]
board[x].append(num)
board[2][2] = ' X'
return board
# checks board if contains num
def inBoard(num, board):
for x in range(0,5):
for y in range(0,5):
if num == board[x][y]:
board[x][y] = ' X'
return True
return False
# bool on if board meets win condition
def didWin(board):
#check for horizontal win
for x in range(0,5):
count = 0
for y in range(0,5):
if board[y][x] != ' X':
break
else:
count += 1
if count == 5:
return True
#check for vertical win
for y in range(0,5):
count = 0
for x in range(0,5):
if board[y][x] != ' X':
break
else:
count += 1
if count == 5:
return True
#check for diagonal - top left to bottom right
count = 0
for i in range(0,5):
if board[i][i] != ' X':
break
else:
count += 1
if count == 5:
return True
#check for diagnal - top right to bottom left
count = 0
for x in range(0,5):
if board[4-x][x] != ' X':
break
else:
count += 1
if count == 5:
return True
return False
def printBoard(board, OG):
print(' Marked Board | Board Numbers')
for y in range(0,5):
ogStr = ''
for x in range(0,5):
if board[x][y] != ' X':
if int(board[x][y]) < 10:
print(" ",end='')
ogStr += ' '
print(board[x][y],end=" ")
ogStr += str(OG[x][y]) + ' '
print(' ' + ogStr)
def draw(notDrawn):
num = notDrawn[random.randint(0,len(notDrawn)-1)]
notDrawn.remove(num)
return num
def playGame():
notDrawn = list(range(1,92))
board1 = generateBoard()
OG_BOARD1 = copy.deepcopy(board1)
clearScreen()
print('Press Enter to Draw Number. Enter q, then press enter to quit.')
printBoard(board1, OG_BOARD1)
while True:
userInput = input()
clearScreen()
if userInput == 'q':
print('Press Enter to Draw Number. Enter q, then press enter to quit.')
printBoard(board1, OG_BOARD1)
print("Game Over")
exit()
else:
curDraw = draw(notDrawn)
found = inBoard(curDraw, board1)
print('Press Enter to Draw Number. Enter q, then press enter to quit.')
printBoard(board1, OG_BOARD1)
if didWin(board1) == True:
while True:
clearScreen()
print('Winner! Enter "new" to start new game. Enter "q" to quit.')
printBoard(board1, OG_BOARD1)
print('BINGO!!! --- YOU WIN! --- Drew ' + str(curDraw))
userInput = input()
if userInput == 'q':
print("Game Over")
return False
elif userInput == 'new':
return True
else:
if found == True:
print("Found a match! (((o(*゚▽゚*)o))) --- Drew " + str(curDraw))
else:
print("Bad draw. No Match. (ノ°Д°)ノ︵ ┻━┻ --- Drew " + str(curDraw))
runGame = True
while runGame == True:
runGame = playGame()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'XySetup.ui'
#
# Created by: PyQt5 UI code generator 5.4.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(942, 428)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
Form.setMinimumSize(QtCore.QSize(942, 428))
Form.setMaximumSize(QtCore.QSize(942, 428))
font = QtGui.QFont()
font.setFamily("Arial")
font.setBold(True)
font.setWeight(75)
Form.setFont(font)
self.groupBox = QtWidgets.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 381, 271))
self.groupBox.setObjectName("groupBox")
self.label = QtWidgets.QLabel(self.groupBox)
self.label.setGeometry(QtCore.QRect(20, 20, 151, 16))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(20, 50, 151, 16))
self.label_2.setObjectName("label_2")
self.lineWidth = QtWidgets.QLineEdit(self.groupBox)
self.lineWidth.setGeometry(QtCore.QRect(180, 20, 113, 20))
self.lineWidth.setObjectName("lineWidth")
self.lineHeight = QtWidgets.QLineEdit(self.groupBox)
self.lineHeight.setGeometry(QtCore.QRect(180, 50, 113, 20))
self.lineHeight.setObjectName("lineHeight")
self.label_3 = QtWidgets.QLabel(self.groupBox)
self.label_3.setGeometry(QtCore.QRect(20, 150, 131, 16))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.groupBox)
self.label_4.setGeometry(QtCore.QRect(20, 210, 131, 16))
self.label_4.setObjectName("label_4")
self.motoA_CK = QtWidgets.QLabel(self.groupBox)
self.motoA_CK.setGeometry(QtCore.QRect(180, 130, 51, 51))
self.motoA_CK.setStyleSheet(" border: 1px solid rgb(67,67,67);\n"
" border-radius: 4px;")
self.motoA_CK.setPixmap(QtGui.QPixmap(":/images/stepping_motor-clockwise.png"))
self.motoA_CK.setObjectName("motoA_CK")
self.motoB_CK = QtWidgets.QLabel(self.groupBox)
self.motoB_CK.setGeometry(QtCore.QRect(180, 190, 51, 51))
self.motoB_CK.setStyleSheet(" border: 1px solid rgb(67,67,67);\n"
" border-radius: 4px;")
self.motoB_CK.setPixmap(QtGui.QPixmap(":/images/stepping_motor-clockwise.png"))
self.motoB_CK.setObjectName("motoB_CK")
self.motoA_CCK = QtWidgets.QLabel(self.groupBox)
self.motoA_CCK.setGeometry(QtCore.QRect(270, 130, 51, 51))
self.motoA_CCK.setPixmap(QtGui.QPixmap(":/images/stepping_motor-anticlockwise.png"))
self.motoA_CCK.setObjectName("motoA_CCK")
self.motoB_CCK = QtWidgets.QLabel(self.groupBox)
self.motoB_CCK.setGeometry(QtCore.QRect(270, 190, 51, 51))
self.motoB_CCK.setPixmap(QtGui.QPixmap(":/images/stepping_motor-anticlockwise.png"))
self.motoB_CCK.setObjectName("motoB_CCK")
self.pushButton = QtWidgets.QPushButton(self.groupBox)
self.pushButton.setGeometry(QtCore.QRect(350, 10, 24, 24))
self.pushButton.setStyleSheet(" QPushButton {\n"
" border-image: url(:/images/help-icon.png) 0;\n"
" }\n"
"\n"
" QPushButton:hover {\n"
" border-image: url(:/images/help-icon-hover.png) 0;\n"
" }\n"
"\n"
" QPushButton:pressed {\n"
" border-image: url(:/images/help-icon-click.png) 0;\n"
" }\n"
"")
self.pushButton.setText("")
self.pushButton.setObjectName("pushButton")
self.label_5 = QtWidgets.QLabel(self.groupBox)
self.label_5.setGeometry(QtCore.QRect(170, 250, 71, 16))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.groupBox)
self.label_6.setGeometry(QtCore.QRect(260, 250, 91, 16))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.groupBox)
self.label_7.setGeometry(QtCore.QRect(20, 80, 121, 16))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.groupBox)
self.label_8.setGeometry(QtCore.QRect(180, 80, 181, 16))
self.label_8.setObjectName("label_8")
self.slidSpeed = QtWidgets.QSlider(self.groupBox)
self.slidSpeed.setGeometry(QtCore.QRect(180, 100, 160, 19))
self.slidSpeed.setProperty("value", 50)
self.slidSpeed.setOrientation(QtCore.Qt.Horizontal)
self.slidSpeed.setObjectName("slidSpeed")
self.labelSpeed = QtWidgets.QLabel(self.groupBox)
self.labelSpeed.setGeometry(QtCore.QRect(20, 100, 131, 16))
self.labelSpeed.setObjectName("labelSpeed")
self.btnOk = QtWidgets.QPushButton(Form)
self.btnOk.setGeometry(QtCore.QRect(290, 290, 91, 23))
self.btnOk.setObjectName("btnOk")
self.label_9 = QtWidgets.QLabel(Form)
self.label_9.setGeometry(QtCore.QRect(400, 10, 571, 431))
self.label_9.setText("")
self.label_9.setPixmap(QtGui.QPixmap(":/images/xy_setup.png"))
self.label_9.setObjectName("label_9")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.groupBox.setTitle(_translate("Form", "Xy Setups"))
self.label.setText(_translate("Form", "Width (mm):"))
self.label_2.setText(_translate("Form", "Height (mm):"))
self.label_3.setText(_translate("Form", "Stepper A Directoin:"))
self.label_4.setText(_translate("Form", "Stepper B Directoin:"))
self.label_5.setText(_translate("Form", "ClockWise"))
self.label_6.setText(_translate("Form", "Anti ClockWise"))
self.label_7.setText(_translate("Form", "Limit Switch Status:"))
self.label_8.setText(_translate("Form", "X-:0 X+:0 Y-:0 Y+:0 "))
self.labelSpeed.setText(_translate("Form", "Speed (50%):"))
self.btnOk.setText(_translate("Form", "Ok"))
import images_rc
|
# _*_ coding:utf-8 _*_
__author__ = 'T'
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from scipy.linalg import eigh
import numpy as np
from sklearn.datasets import make_moons
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
def rbf_kernal_pca(X, gamma, n_components):
'''
:param X: shape = [n_sample, n_feature]
:param gamma: Tuning parameter
:param n_components: number of component to return
:return:X_pc shape= [n_sample, k_feature]
'''
sq_dists = pdist(X, 'sqeuclidean')
mat_sq_dists = squareform(sq_dists)
K = np.exp(- gamma * mat_sq_dists)
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
eigvals, eigvecs = eigh(K)
print(eigvals)
X_pc = np.column_stack((eigvecs[:, -i] for i in range(1, n_components + 1)))
lambdas = [eigvals[-i] for i in range(1, n_components + 1)]
return X_pc, lambdas
def project_x(x_new, X, gamma, alphas, lambdas):
pair_dist = np.array([np.sum((x_new - x) ** 2) for x in X])
k = np.exp(-gamma * pair_dist)
return k.dot(alphas / lambdas)
if __name__ == "__main__":
X, y = make_moons(n_samples=100, random_state=123)
skpca = PCA(n_components=2)
X_spca = skpca.fit_transform(X)
X_pca, lambdas = rbf_kernal_pca(X, gamma=15, n_components=2)
x_new = X[25]
x_proj = X_pca[25]
x_reproj = project_x(x_new, X, gamma=15, alphas=X_pca, lambdas=lambdas)
print(x_reproj)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1], color='r', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1], color='b', marker='o', alpha=0.5)
ax[1].scatter(X_pca[y == 0, 0], X_pca[y == 0, 1], color='r', marker='^', alpha=0.5)
ax[1].scatter(X_pca[y == 1, 0], X_pca[y == 1, 1], color='b', marker='o', alpha=0.5)
plt.show()
|
##gcd program
#num1 = int ( input ( ' Enter the number 1 = ' ) )
#num2 = int ( input ( ' Enter the number 2 = ' ) )
#min = num1 if num1 < num2 else num2 # to find smaller number
#larg = 1
#for i in range ( 1, min+1 ):
#if num1 % i == 0 and num2 % i == 0:
#larg = i
## print GCD
#print ( ' GCD of ', num1 , ' and ' , num2 , ' = ' , larg )
#________________________
# Same funcation using funcations
def GCD(num1, num2):
min = num1 if num1 < num2 else num2
lar = 1
for i in range ( 1, min + 1 ):
if num1%i == 0 and num2%i == 0:
lar = i
return lar
num1 = int ( input ( ' Enter the number 1 = ' ) )
num2 = int ( input ( ' Enter the number 2 = ' ) )
print ( ' GCD of ', num1 , ' and ' , num2 , ' = ' , GCD(num1,num2)) |
import tensorflow as tf
import numpy as np
import pymysql
tf.set_random_seed(700)
seq_length = 7
data_dim = 1
hidden_dim = 10
output_dim = 1
learning_rate = 0.01
iterations = 2000
#num_layers = 3
# connect db
con=pymysql.connect(host='52.78.192.119',port=3306,user='root',password='Cap2bowoo!',db='abeekx',charset='utf8')
cursor=con.cursor()
cursor.execute("SELECT ppm,time FROM sensors")
xy=[]
# noisy data handling
flag=False
for row in cursor:
cur_ppm = float(row[0])
if(flag):
if(cur_ppm-pre_ppm>3000.0):
xy.append([pre_ppm])
cur_ppm=pre_ppm
else:
xy.append([cur_ppm])
else:
xy.append([cur_ppm])
pre_ppm = cur_ppm
flag=True
cursor.close()
con.close() # db disconnect
xy1=xy # pre Scalar data
numerator = xy - np.min(xy, 0) # MinMaxScalar
denominator = np.max(xy, 0) - np.min(xy, 0)
xy = (xy - np.min(xy, 0))/ (denominator + 1e-7)
x = xy
dataX = []
dataY = []
for i in range(0, len(x) - seq_length):
_x = x[i:i + seq_length]
_y = x[i+seq_length]
dataX.append(_x)
dataY.append(_y)
# train/test split
train_size = int(0.8*len(dataX))
trainX, testX = np.array(dataX[0:train_size]), np.array(dataX[train_size:len(dataX)])
trainY, testY = np.array(dataY[0:train_size]), np.array(dataY[train_size:len(dataY)])
# input place holders
X = tf.placeholder(tf.float32, [None, seq_length, data_dim])
Y = tf.placeholder(tf.float32, [None, data_dim])
# build a LSTM network
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
'''
# Multi Layer LSTM network
cells = []
for _ in range(num_layers):
cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True, activation=tf.tanh)
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.8)
cells.append(cell)
cell = tf.contrib.rnn.MultiRNNCell(cells,state_is_tuple=True)
'''
outputs, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
Y_pred = tf.contrib.layers.fully_connected(outputs[:,-1], output_dim, activation_fn=None)
# cost/loss
loss = tf.reduce_sum(tf.square(Y_pred - Y)) # sum of the squares
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
train = optimizer.minimize(loss)
# RMSE
targets = tf.placeholder(tf.float32, [None, data_dim])
predictions = tf.placeholder(tf.float32, [None, data_dim])
rmse = tf.sqrt(tf.reduce_mean(tf.square(targets - predictions)))
sess=tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
# Training step
for i in range(iterations):
_, step_loss = sess.run([train, loss], feed_dict={X: trainX, Y: trainY})
print("[step: {}] loss: {}".format(i, step_loss))
saver=tf.train.Saver()
save_path=saver.save(sess,"./rnn_train_co2.ckpt") # save train variable |
import json
import os
from pathlib import Path
from typing import Any, Dict, Generic, List, Optional, Type, TypeVar, overload
import confuse # type:ignore
import dotenv
from pydantic.generics import GenericModel
from hibiapi import __file__ as root_file
CONFIG_DIR = Path(".") / "configs"
DEFAULT_DIR = Path(root_file).parent / "configs"
_T = TypeVar("_T")
def _generate_default() -> int:
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
generated = 0
for file in os.listdir(DEFAULT_DIR):
default_path = DEFAULT_DIR / file
config_path = CONFIG_DIR / file
if config_path.is_file():
continue
generated += config_path.write_text(
default_path.read_text(encoding="utf-8"),
encoding="utf-8",
)
return generated
if dotenv.find_dotenv():
assert dotenv.load_dotenv(), "Failed to load .env"
else:
assert _generate_default() <= 0, "Please complete config file!"
class _TypeChecker(GenericModel, Generic[_T]):
value: _T
class ConfigSubView(confuse.Subview):
@overload
def get(self) -> Any:
...
@overload
def get(self, template: Type[_T]) -> _T:
...
def get(self, template: Optional[Type[_T]] = None) -> _T:
return _TypeChecker[template or Any](value=super().get()).value # type:ignore
def as_str(self) -> str:
return self.get(str)
def as_str_seq(self, split: str = "\n") -> List[str]:
return self.as_str().strip().split(split)
def as_number(self) -> int:
return self.get(int)
def as_bool(self) -> bool:
return self.get(bool)
def as_path(self) -> Path:
return self.get(Path)
def as_dict(self) -> Dict[str, Any]:
return self.get(Dict[str, Any])
def __getitem__(self, key: str) -> "ConfigSubView":
return self.__class__(self, key)
class AppConfig(confuse.Configuration):
def __init__(self, name: str):
self._config_name = name
self._config = CONFIG_DIR / (filename := name + ".yml")
self._default = DEFAULT_DIR / filename
super().__init__(name)
self._add_env_source()
def config_dir(self) -> str:
return str(CONFIG_DIR)
def user_config_path(self) -> str:
return str(self._config)
def _add_env_source(self):
config_name = self._config_name.lower() + "_"
env_configs = {
k[len(config_name) :].lower(): str(v)
for k, v in os.environ.items()
if k.lower().startswith(config_name)
}
# Convert `AAA_BBB_CCC=DDD` to `{'aaa':{'bbb':{'ccc':'ddd'}}}`
source_tree: Dict[str, Any] = {}
for key, value in env_configs.items():
_tmp = source_tree
*nodes, name = key.split("_")
for node in nodes:
_tmp = _tmp.setdefault(node, {})
if value == "":
continue
try:
_tmp[name] = json.loads(value)
except json.JSONDecodeError:
_tmp[name] = value
self.sources.insert(0, confuse.ConfigSource.of(source_tree))
def _add_default_source(self):
self.add(confuse.YamlSource(self._default, default=True))
def _add_user_source(self):
self.add(confuse.YamlSource(self._config, optional=True))
def __getitem__(self, key: str) -> ConfigSubView:
return ConfigSubView(self, key)
class GeneralConfig(AppConfig):
def __init__(self, name: str):
super().__init__(name)
class APIConfig(GeneralConfig):
pass
Config = GeneralConfig("general")
DATA_PATH = Config["data"]["path"].as_path().expanduser().absolute()
DEBUG = Config["debug"].as_bool()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 11 22:15:59 2016
@author: QiuXun
https://leetcode.com/problems/two-sum/
"""
class Solution(object):
def __init__(self):
self.vals = []
def twoSum(self, nums, target):
# construct a dictionary from the list 'nums'
# with value as keys and indices as value
dict_num = dict()
L = len(nums)
index = 0
for i in nums:
# if there are several indices with the same value
# put all indices in a list
dict_num[i] = dict_num.get(i,[])+[index]
index+=1
keys_dict = dict_num.keys()
for i in range(L):
var1 = nums[i]
var2 = target - var1
if var1==var2:
# only return if they are in different positions
if len(dict_num[var1])>1:
return dict_num[var1]
else:
if var2 in keys_dict:
j = dict_num[var2]
return [i]+j
# Test case
S = Solution()
S.twoSum([0,4,3,0],0) |
# Marianne Lawless
#Programming and Scripting Project 2018
# Iris dataset downloaded from https://archive.ics.uci.edu/ml/datasets/iris
import numpy # Read data file into array
data = numpy.genfromtxt('data/iris.csv', delimiter=',')
data[0] # Access the first line of data file#
(data[:,0]) # Access the first column of the data file ..Sepal Lenght
(data[:,1]) # Access the second column of the data file ..Sepal Width
(data[:,2]) # Access the third column of the data file ..Petal Lenght
(data[:,3]) # Access the fouth column of the data file ..Petal width
numpy.mean(data[:,0]) # Average / Mean of Sepal Lenght
5.8433333333333337
numpy.mean(data[:,1])
3.0540000000000003 # Average / Mean of Sepal Width
numpy.mean(data[:,2])
3.7586666666666662 # Average / Mean of Petal Lenght
numpy.mean(data[:,3]) # Average / Mean of Petal Width
1.1986666666666668
numpy.max (data[:,0]) # Maximum lenght of the Sepal Lenght
7.9000000000000004
numpy.max (data[:,1]) # Maximum Lenght of the Sepal Width
4.4000000000000004
numpy.max (data[:,2])
6.9000000000000004 # Maximum Lenght of the Petal Lenght
numpy.max (data[:,3]) # Maximum Lenght of the Petal Width
2.5
numpy.min (data[:,0])
4.2999999999999998 # Mininmum Lenght Sepal Lenght
numpy.min (data[:,1]) # Minimum Lenght Sepal Width
2.0
numpy.min (data[:,2]) # Minimum Lenght Petal Lenght
1.0
numpy.min (data[:,3]) # Minimum Lenght Petal Width
0.10000000000000001
|
# __all__ =[
# 'augmentations',
# 'datasets',
# 'logger',
# 'parse_config',
# 'transforms',
# 'utils'
# ] |
# coding: utf-8
import theano
from theano import tensor as T
import numpy as np
data = np.array([[1, 2, 3]], dtype=theano.config.floatX)
x = T.dmatrix(name='x')
w = theano.shared(np.asarray([[0.0, 0.0, 0.0]], dtype=theano.config.floatX))
z = x.dot(w.T)
update = [[w, w + 1.0]]
net_input = theano.function(inputs=[], updates=update, givens={x: data}, outputs=z)
for i in range(5):
print('z%d:' % i, net_input())
|
import utils, time, socket, sys, re, os
os.system("mkdir -p /opt/cto/log/")
logger = utils.getLogger("/opt/cto/log/installation.log","a")
logger.info("*******INSTALLATION OF PLATFORM BEGIN******")
cm_server_host=utils.getXmlValue("manager","host")
logger.info("Cloudera Manager from XML "+cm_server_host)
ntp_server=utils.getXmlValue("manager","ntpserver")
logger.info("NTP Server from XML"+ntp_server)
repo_url = "http://"+cm_server_host+"/cm/"
# Prep Cloudera repo
logger.info("creating repo file to be placed on Cloudera Manager")
repo_file = open("cloudera-manager.repo","w")
repo_file.writelines(["[cloudera-manager]\n","name=cloudera-manager\n","baseurl="+repo_url+"\n","enabled=1\n","gpgcheck=0\n"])
repo_file.close()
#setup the repo on CM
logger.info("copying repo files to Cloudera Manager")
utils.run_os_command("sudo yum -y install createrepo httpd")
command = "service httpd start"
utils.run_os_command(command)
#if socket.gethostname() == cm_server_host or socket.gethostbyname(socket.gethostname) == cm_server_host:
utils.run_os_command("sudo cp cloudera-manager.repo /etc/yum.repos.d/")
# Turn off firewall
logger.info("Turning off firewall on the server")
utils.run_os_command("sudo service iptables stop")
utils.run_os_command("sudo service ip6tables stop")
# Turn off SELINUX
logger.info("Turning off selinux on the server")
utils.run_os_command("echo 0 | sudo tee /selinux/enforce > /dev/null")
# Set up NTP
logger.info("Enabling NTP on the server")
utils.run_os_command("sudo yum -y install ntp")
utils.run_os_command("sudo chkconfig ntpd on")
#setup NTPServer config
with open("/etc/ntp.conf", "r") as sources:
lines = sources.readlines()
with open("/etc/ntp.conf", "w") as sources:
for line in lines:
if "server " in line:
sources.write(re.sub(r"^server ", "#server ", line))
else:
sources.write(line)
sources.write("server " + ntp_server)
utils.run_os_command("sudo service ntpd start")
# For master
command="sudo yum -y install cloudera-manager-agent cloudera-manager-daemons"
utils.run_os_command(command)
with open("/etc/cloudera-scm-agent/config.ini", "r") as sources:
lines = sources.readlines()
with open("/etc/cloudera-scm-agent/config.ini", "w") as sources:
for line in lines:
sources.write(re.sub(r"server_host=.*", "server_host="+cm_server_host, line))
logger.info("Starting Cloudera Agent .....!!")
command="sudo service cloudera-scm-agent start"
utils.run_os_command(command)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description
Models need to be parsed and put together, this saves time on that, if it has been parsed before and not updated
~~~~~~~~~~~~~~~
:license: MIT
:author: Stephen Dop
"""
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
from sqlalchemy.orm import relationship
from DataModels import base
import datetime
class ParsedPostModel(base):
__tablename__ = 'rsisubs_parsed_posts'
id = Column(Integer, primary_key=True)
post_id = Column(Integer, ForeignKey('rsisubs_posts.id'))
parsed_text = Column(String, nullable=True)
last_update = Column(DateTime(timezone=True), default=datetime.datetime.utcnow)
post = relationship("PostModel") |
def my_mp3_playlist(file_path):
text = ""
splitted = []
grouped = []
longest = ""
count = 0
freq_authour = ""
with open(file_path, "r") as f:
text = f.read().replace("\n", "")
splitted = text.split(";")
for i in range(0, len(splitted)-3, 3):
grouped += [[splitted[i], splitted[i+1], splitted[i+2]]]
# find longest
max_len = 0.0
max_index = 0
for i in grouped:
if float(i[2].replace(':', ".")) > max_len:
max_len = float(i[2].replace(':', "."))
max_index = grouped.index(i)
longest = grouped[max_index][0]
# find count
count = len(grouped)
# find most freq authour
max_count = 0
max_index = 0
for i in grouped:
if splitted.count(i[1]) > max_count:
max_count = splitted.count(i[1])
max_index = grouped.index(i)
freq_authour = grouped[max_index][1]
return longest, count, freq_authour
_ = my_mp3_playlist("files/songs.txt")
print(type(_), _) |
from shutil import copyfile, move
from libtuto.config_file_data import ConfigFileData
from os.path import isfile
class InvalidConfigList(Exception):
pass
class ConfigFileOverride:
def __init__(self, override_data_list):
"""
:type override_data_list: List[ConfigFileData]
"""
if not override_data_list:
raise InvalidConfigList("Argument override_data_list is invalid: {}"
.format(override_data_list))
self._override_data_list = override_data_list
self._create_backups()
self._override_config_files()
def _create_backups(self):
for override_data in self._override_data_list:
config = override_data.overridden_file()
if isfile(config):
override_data.backup_file = config + ".backup"
"""
:type config: str
"""
copyfile(config, config + ".backup")
def _override_config_files(self):
for override_data in self._override_data_list:
default_config = override_data.default_config_file()
file_to_override = override_data.overridden_file()
if isfile(default_config):
copyfile(default_config, file_to_override)
else:
with open(file_to_override, "w+") as text_file:
text_file.write(default_config)
def restore_backed_up_files(self):
for override_data in self._override_data_list:
if hasattr(override_data, "backup_file"):
move(override_data.backup_file, override_data.overridden_file())
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 24 15:57:09 2019
@author: zixing.mei
"""
import lightgbm as lgb
import random
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import roc_curve
from matplotlib import pyplot as plt
import math
df_train = data[data.obs_mth != '2018-11-30'].reset_index().copy()
df_test = data[data.obs_mth == '2018-11-30'].reset_index().copy()
NUMERIC_COLS = ['person_info','finance_info','credit_info','act_info']
from sklearn.preprocessing import OneHotEncoder,LabelEncoder
lgb_train = lgb.Dataset(df_train[NUMERIC_COLS],
df_train['bad_ind'], free_raw_data=False)
params = {
'num_boost_round': 50,
'boosting_type': 'gbdt',
'objective': 'binary',
'num_leaves': 2,
'metric': 'auc',
'max_depth':1,
'feature_fraction':1,
'bagging_fraction':1, }
model = lgb.train(params,lgb_train)
leaf = model.predict(df_train[NUMERIC_COLS],pred_leaf=True)
lgb_enc = OneHotEncoder()
#生成交叉特征
lgb_enc.fit(leaf)
#和原始特征进行合并
data_leaf = np.hstack((lgb_enc.transform(leaf).toarray(),df_train[NUMERIC_COLS]))
leaf_test = model.predict(df_test[NUMERIC_COLS],pred_leaf=True)
lgb_enc = OneHotEncoder()
lgb_enc.fit(leaf_test)
data_leaf_test = np.hstack((lgb_enc.transform(leaf_test).toarray(),
df_test[NUMERIC_COLS]))
train = data_leaf.copy()
train_y = df_train['bad_ind'].copy()
val = data_leaf_test.copy()
val_y = df_test['bad_ind'].copy()
lgb_lm = LogisticRegression(penalty='l2',C=0.2, class_weight='balanced',solver='liblinear')
lgb_lm.fit(train, train_y)
y_pred_lgb_lm_train = lgb_lm.predict_proba(train)[:, 1]
fpr_lgb_lm_train, tpr_lgb_lm_train, _ = roc_curve(train_y,y_pred_lgb_lm_train)
y_pred_lgb_lm = lgb_lm.predict_proba(val)[:,1]
fpr_lgb_lm,tpr_lgb_lm,_ = roc_curve(val_y,y_pred_lgb_lm)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_lgb_lm_train,tpr_lgb_lm_train,label='LGB + LR train')
plt.plot(fpr_lgb_lm, tpr_lgb_lm, label='LGB + LR test')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
print('LGB+LR train ks:',abs(fpr_lgb_lm_train - tpr_lgb_lm_train).max(),
'LGB+LR AUC:', metrics.auc(fpr_lgb_lm_train, tpr_lgb_lm_train))
print('LGB+LR test ks:',abs(fpr_lgb_lm - tpr_lgb_lm).max(),
'LGB+LR AUC:', metrics.auc(fpr_lgb_lm, tpr_lgb_lm))
dff_train = pd.DataFrame(train)
dff_train.columns = [ 'ft' + str(x) for x in range(train.shape[1])]
dff_val = pd.DataFrame(val)
dff_val.columns = [ 'ft' + str(x) for x in range(val.shape[1])]
#生成可以传入PSI的数据集
def make_psi_data(dff_train):
dftot = pd.DataFrame()
for col in dff_train.columns:
zero= sum(dff_train[col] == 0)
one= sum(dff_train[col] == 1)
ftdf = pd.DataFrame(np.array([zero,one]))
ftdf.columns = [col]
if len(dftot) == 0:
dftot = ftdf.copy()
else:
dftot[col] = ftdf[col].copy()
return dftot
psi_data_train = make_psi_data(dff_train)
psi_data_val = make_psi_data(dff_val)
def var_PSI(dev_data, val_data):
dev_cnt, val_cnt = sum(dev_data), sum(val_data)
if dev_cnt * val_cnt == 0:
return 0
PSI = 0
for i in range(len(dev_data)):
dev_ratio = dev_data[i] / dev_cnt
val_ratio = val_data[i] / val_cnt + 1e-10
psi = (dev_ratio - val_ratio) * math.log(dev_ratio/val_ratio)
PSI += psi
return PSI
psi_dct = {}
for col in dff_train.columns:
psi_dct[col] = var_PSI(psi_data_train[col],psi_data_val[col])
f = zip(psi_dct.keys(),psi_dct.values())
f = sorted(f,key = lambda x:x[1],reverse = False)
psi_df = pd.DataFrame(f)
psi_df.columns = pd.Series(['变量名','PSI'])
feature_lst = list(psi_df[psi_df['PSI']<psi_df.quantile(0.6)[0]]['变量名'])
train = dff_train[feature_lst].copy()
train_y = df_train['bad_ind'].copy()
val = dff_val[feature_lst].copy()
val_y = df_test['bad_ind'].copy()
lgb_lm = LogisticRegression(C = 0.3,class_weight='balanced',solver='liblinear')
lgb_lm.fit(train, train_y)
y_pred_lgb_lm_train = lgb_lm.predict_proba(train)[:, 1]
fpr_lgb_lm_train, tpr_lgb_lm_train, _ = roc_curve(train_y, y_pred_lgb_lm_train)
y_pred_lgb_lm = lgb_lm.predict_proba(val)[:, 1]
fpr_lgb_lm, tpr_lgb_lm, _ = roc_curve(val_y, y_pred_lgb_lm)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_lgb_lm_train, tpr_lgb_lm_train, label='LGB + LR train')
plt.plot(fpr_lgb_lm, tpr_lgb_lm, label='LGB + LR test')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
print('LGB+LR train ks:',abs(fpr_lgb_lm_train - tpr_lgb_lm_train).max(),
'LGB+LR AUC:', metrics.auc(fpr_lgb_lm_train, tpr_lgb_lm_train))
print('LGB+LR test ks:',abs(fpr_lgb_lm - tpr_lgb_lm).max(),'LGB+LR AUC:',
metrics.auc(fpr_lgb_lm, tpr_lgb_lm))
x = train
y = train_y
val_x = val
val_y = val_y
#定义lgb函数
def LGB_test(train_x,train_y,test_x,test_y):
from multiprocessing import cpu_count
clf = lgb.LGBMClassifier(
boosting_type='gbdt', num_leaves=31, reg_Ap=0.0, reg_lambda=1,
max_depth=2, n_estimators=800,max_features=140,objective='binary',
subsample=0.7, colsample_bytree=0.7, subsample_freq=1,
learning_rate=0.05, min_child_weight=50,
random_state=None,n_jobs=cpu_count()-1,)
clf.fit(train_x, train_y,eval_set=[(train_x, train_y),(test_x,test_y)],
eval_metric='auc',early_stopping_rounds=100)
return clf,clf.best_score_[ 'valid_1']['auc']
#训练模型
model,auc = LGB_test(x,y,val_x,val_y)
#模型贡献度放在feture中
feature = pd.DataFrame(
{'name' : model.booster_.feature_name(),
'importance' : model.feature_importances_
}).sort_values(by = ['importance'],ascending = False)
feature_lst2 = list(feature[feature.importance>5].name)
train = dff_train[feature_lst2].copy()
train_y = df_train['bad_ind'].copy()
val = dff_val[feature_lst2].copy()
val_y = df_test['bad_ind'].copy()
lgb_lm = LogisticRegression(C = 0.3,class_weight='balanced',solver='liblinear')
lgb_lm.fit(train, train_y)
y_pred_lgb_lm_train = lgb_lm.predict_proba(train)[:, 1]
fpr_lgb_lm_train, tpr_lgb_lm_train, _ = roc_curve(train_y, y_pred_lgb_lm_train)
y_pred_lgb_lm = lgb_lm.predict_proba(val)[:, 1]
fpr_lgb_lm, tpr_lgb_lm, _ = roc_curve(val_y, y_pred_lgb_lm)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_lgb_lm_train, tpr_lgb_lm_train, label='LGB + LR train')
plt.plot(fpr_lgb_lm, tpr_lgb_lm, label='LGB + LR test')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
print('LGB+LR train ks:',abs(fpr_lgb_lm_train - tpr_lgb_lm_train).max(),
'LGB+LR AUC:', metrics.auc(fpr_lgb_lm_train, tpr_lgb_lm_train))
print('LGB+LR test ks:',abs(fpr_lgb_lm - tpr_lgb_lm).max(),'LGB+LR AUC:',
metrics.auc(fpr_lgb_lm, tpr_lgb_lm))
|
arr=[12,14,22,8,1,6,0]
for i in range(len(arr)):
for j in range(i+1,len(arr)):
if arr[i]>arr[j]:
arr[i],arr[j]=arr[j],arr[i]
print(*arr)
# for i in range(len(arr)):
# for j in range(len(arr)-1):
# if arr[j]>arr[j+1]:
# arr[j],arr[j+1]=arr[j+1],arr[j]
# print(*arr)
# arr.sort()
# a=int(input("Enter new Number"))
# for i in range(len(arr)):
# if arr[i]>a:
# arr.insert(arr.index(arr[i]),a)
# break
# print(*arr)
|
# Third-Party Imports
from django.core.exceptions import ValidationError
from rest_framework import serializers
# App Imports
from core import models
from core.constants import CHECKIN, CHECKOUT
class AssetSerializer(serializers.ModelSerializer):
checkin_status = serializers.SerializerMethodField()
allocation_history = serializers.SerializerMethodField()
assigned_to = serializers.SerializerMethodField()
asset_category = serializers.ReadOnlyField()
asset_sub_category = serializers.ReadOnlyField()
asset_make = serializers.ReadOnlyField()
make_label = serializers.ReadOnlyField(source="asset_make")
asset_type = serializers.ReadOnlyField()
asset_location = serializers.SlugRelatedField(
many=False,
slug_field="name",
required=False,
queryset=models.AndelaCentre.objects.all(),
)
department = serializers.SlugRelatedField(
read_only=False,
slug_field="name",
queryset=models.Department.objects.all(),
required=False,
)
team_name = serializers.SlugRelatedField(
read_only=False,
slug_field="name",
queryset=models.DepartmentalTeam.objects.all(),
required=False,
)
model_number = serializers.SlugRelatedField(
queryset=models.AssetModelNumber.objects.all(), slug_field="name"
)
class Meta:
model = models.Asset
fields = (
"id",
"uuid",
"asset_category",
"asset_sub_category",
"asset_make",
"make_label",
"asset_code",
"serial_number",
"model_number",
"checkin_status",
"created_at",
"last_modified",
"current_status",
"asset_type",
"allocation_history",
"specs",
"purchase_date",
"notes",
"assigned_to",
"asset_location",
"verified",
"invoice_receipt",
"department",
"team_name",
"active",
"paid",
"expiry_date",
)
depth = 1
read_only_fields = (
"uuid",
"created_at",
"last_modified",
"assigned_to",
"current_status",
"notes",
"asset_category",
"asset_sub_category",
"asset_make",
)
def get_checkin_status(self, obj):
try:
asset_log = (
models.AssetLog.objects.filter(asset=obj)
.order_by("-created_at")
.first()
)
if asset_log.log_type == CHECKIN:
return "checked_in"
elif asset_log.log_type == CHECKOUT:
return "checked_out"
except AttributeError:
return None
def get_assigned_to(self, obj):
if not obj.assigned_to:
return None
if obj.assigned_to.department:
from api.serializers import DepartmentSerializer
serialized_data = DepartmentSerializer(obj.assigned_to.department)
elif obj.assigned_to.workspace:
from api.serializers import OfficeWorkspaceSerializer
serialized_data = OfficeWorkspaceSerializer(obj.assigned_to.workspace)
elif obj.assigned_to.user:
from api.serializers import UserSerializer
serialized_data = UserSerializer(obj.assigned_to.user)
elif obj.assigned_to.team:
from api.serializers import TeamSerializer
serialized_data = TeamSerializer(obj.assigned_to.team)
else:
return None
return serialized_data.data
def get_allocation_history(self, obj):
allocations = models.AllocationHistory.objects.filter(asset=obj.id)
return [
{
"id": allocation.id,
"current_assignee": allocation.current_assignee.email
if allocation.current_assignee
else None,
"previous_assignee": allocation.previous_assignee.email
if allocation.previous_assignee
else None,
"assigner": allocation.assigner.email if allocation.assigner else None,
"created_at": allocation.created_at,
}
for allocation in allocations
]
def to_internal_value(self, data):
internals = super(AssetSerializer, self).to_internal_value(data)
specs_serializer = AssetSpecsSerializer(data=data)
specs_serializer.is_valid()
if len(specs_serializer.data):
try:
specs, _ = models.AssetSpecs.objects.get_or_create(
**specs_serializer.data
)
except ValidationError as err:
raise serializers.ValidationError(err.error_dict)
internals["specs"] = specs
return internals
# # allow updating of active_inactive and paid for specific asset_types
def update(self, instance, validated_data):
asset_type = instance.model_number.asset_make.asset_type.name
paid = validated_data.get("paid")
active_inactive = validated_data.get("active")
expiry_date = validated_data.get("expiry_date")
instance_type_and_associated_error = {
"simcard": {"paid": "Only sim cards can have this field updated"},
"mifi": {"active": "Only mifi cards can be activated or deactivated"},
"embursecard": {
"expiry_date": "Only emburse cards can have this field updated"
},
}
if paid or expiry_date or active_inactive:
for instance_type in instance_type_and_associated_error:
# determine which error to raise by looking up which type is being updated
if (
validated_data.get(
list(instance_type_and_associated_error[instance_type].keys())[
0
]
)
and asset_type != instance_type
):
raise serializers.ValidationError(
instance_type_and_associated_error[instance_type]
)
instance = super().update(instance, validated_data)
return instance
class AssetAssigneeSerializer(serializers.ModelSerializer):
assignee = serializers.SerializerMethodField()
class Meta:
model = models.AssetAssignee
fields = ("id", "assignee")
def get_assignee(self, obj):
if obj.user:
return obj.user.email
elif obj.department:
return obj.department.name
elif obj.workspace:
return obj.workspace.name
elif obj.team:
return obj.team.name
class AssetLogSerializer(serializers.ModelSerializer):
asset_make = serializers.ReadOnlyField(source="asset.model_number.asset_make.name")
asset_type = serializers.ReadOnlyField(
source="asset.model_number.asset_make.asset_type.name"
)
asset_sub_category = serializers.ReadOnlyField(
source="asset.model_number.asset_make.asset_type.asset_sub_category.name"
)
asset_category = serializers.ReadOnlyField(
source="asset.model_number.asset_make.asset_type.asset_sub_category.asset_category.name"
)
model_number = serializers.ReadOnlyField(source="asset.model_number.name")
class Meta:
model = models.AssetLog
fields = (
"id",
"asset",
"log_type",
"created_at",
"last_modified",
"asset_make",
"asset_type",
"model_number",
"asset_sub_category",
"asset_category",
)
def to_representation(self, instance):
instance_data = super().to_representation(instance)
asset = models.Asset.objects.get(id=instance.asset.id)
serial_no = asset.serial_number
asset_code = asset.asset_code
instance_data["checked_by"] = instance.checked_by.email
instance_data["asset"] = f"{serial_no} - {asset_code}"
return instance_data
def validate(self, fields):
existing_log = models.AssetLog.objects.filter(asset=fields["asset"])
existing_log = existing_log.first()
if existing_log and existing_log.log_type == fields["log_type"]:
raise serializers.ValidationError(
f"The asset log type is already {existing_log.log_type}"
)
return fields
class AssetStatusSerializer(AssetSerializer):
status_history = serializers.SerializerMethodField()
class Meta:
model = models.AssetStatus
fields = (
"id",
"asset",
"current_status",
"status_history",
"previous_status",
"created_at",
)
def get_status_history(self, obj):
asset_status = models.AssetStatus.objects.filter(asset=obj.asset)
return [
{
"id": asset.id,
"asset": asset.asset_id,
"current_status": asset.current_status,
"previous_status": asset.previous_status,
"created_at": asset.created_at,
}
for asset in asset_status
if obj.created_at > asset.created_at
]
def to_representation(self, instance):
instance_data = super().to_representation(instance)
serial_no = instance.asset.serial_number
asset_code = instance.asset.asset_code
instance_data["asset"] = f"{asset_code} - {serial_no}"
return instance_data
class AllocationsSerializer(serializers.ModelSerializer):
class Meta:
model = models.AllocationHistory
fields = ("asset", "current_assignee", "previous_assignee", "created_at")
read_only_fields = ("previous_assignee",)
def to_representation(self, instance):
instance_data = super().to_representation(instance)
serial_no = instance.asset.serial_number
asset_code = instance.asset.asset_code
if instance.previous_assignee:
instance_data["previous_assignee"] = instance.previous_assignee.email
if instance.current_assignee:
instance_data["current_assignee"] = instance.current_assignee.email
if instance.assigner:
instance_data["assigner"] = instance.assigner.email
instance_data["asset"] = f"{serial_no} - {asset_code}"
return instance_data
class AssetCategorySerializer(serializers.ModelSerializer):
category_name = serializers.ReadOnlyField(source="name")
class Meta:
model = models.AssetCategory
fields = ("id", "name", "created_at", "last_modified", "category_name")
def to_internal_value(self, data):
_data = data.copy()
if not _data.get("name"):
_data["name"] = _data.get("category_name")
internal_value = super().to_internal_value(_data)
return internal_value
class AssetSubCategorySerializer(serializers.ModelSerializer):
sub_category_name = serializers.ReadOnlyField(source="name")
class Meta:
model = models.AssetSubCategory
fields = (
"id",
"name",
"asset_category",
"created_at",
"last_modified",
"sub_category_name",
)
def to_internal_value(self, data):
_data = data.copy()
if not _data.get("name"):
_data["name"] = _data.get("sub_category_name")
internal_value = super().to_internal_value(_data)
return internal_value
def to_representation(self, instance):
instance_data = super().to_representation(instance)
instance_data["asset_category"] = instance.asset_category.name
return instance_data
class AssetTypeSerializer(serializers.ModelSerializer):
asset_type = serializers.ReadOnlyField(source="name")
class Meta:
model = models.AssetType
fields = (
"id",
"name",
"asset_sub_category",
"has_specs",
"created_at",
"last_modified",
"asset_type",
)
def to_internal_value(self, data):
_data = data.copy()
if not data.get("name"):
_data["name"] = _data.get("asset_type")
internal_value = super().to_internal_value(_data)
return internal_value
def to_representation(self, instance):
instance_data = super().to_representation(instance)
instance_data["asset_sub_category"] = instance.asset_sub_category.name
return instance_data
class AssetModelNumberSerializer(serializers.ModelSerializer):
make_label = serializers.SerializerMethodField()
model_number = serializers.ReadOnlyField(source="name")
class Meta:
model = models.AssetModelNumber
fields = (
"id",
"name",
"asset_make",
"created_at",
"last_modified",
"model_number",
"make_label",
)
def to_representation(self, instance):
instance_data = super().to_representation(instance)
instance_data["asset_make"] = instance.asset_make.name
return instance_data
def to_internal_value(self, data):
_data = data.copy()
if not _data.get("asset_make"):
_data["asset_make"] = _data.get("make_label")
if not _data.get("name"):
_data["name"] = _data.get("model_number")
asset_make = _data.get("asset_make")
if not asset_make:
raise serializers.ValidationError(
{"asset_make": [self.error_messages["required"]]}
)
try:
asset_make_instance = models.AssetMake.objects.get(id=asset_make)
except Exception:
raise serializers.ValidationError(
{"asset_make": [f'Invalid pk "{asset_make}" - object does not exist.']}
)
internal_value = super().to_internal_value(_data)
internal_value.update({"asset_make": asset_make_instance})
return internal_value
def get_make_label(self, obj):
return obj.asset_make.name
class AssetConditionSerializer(serializers.ModelSerializer):
class Meta:
model = models.AssetCondition
fields = ("id", "asset", "notes", "created_at")
def to_representation(self, instance):
instance_data = super().to_representation(instance)
serial_no = instance.asset.serial_number
asset_code = instance.asset.asset_code
instance_data["asset"] = f"{serial_no} - {asset_code}"
return instance_data
class AssetMakeSerializer(serializers.ModelSerializer):
asset_type = serializers.SerializerMethodField()
make_label = serializers.SerializerMethodField()
class Meta:
model = models.AssetMake
fields = (
"id",
"name",
"asset_type",
"created_at",
"last_modified_at",
"make_label",
)
def get_asset_type(self, obj):
return obj.asset_type.name
def get_make_label(self, obj):
return obj.name
def to_internal_value(self, data):
_data = data.copy()
if not _data.get("name"):
_data["name"] = _data.get("make_label")
asset_type = _data["asset_type"]
if not asset_type:
raise serializers.ValidationError(
{"asset_type": [self.error_messages["required"]]}
)
try:
asset_type_instance = models.AssetType.objects.get(id=asset_type)
except Exception:
raise serializers.ValidationError(
{"asset_type": [f'Invalid pk "{asset_type}" - object does not exist.']}
)
internal_value = super().to_internal_value(_data)
internal_value.update({"asset_type": asset_type_instance})
return internal_value
class AssetIncidentReportSerializer(serializers.ModelSerializer):
submitted_by = serializers.SerializerMethodField()
created_at = serializers.SerializerMethodField()
class Meta:
model = models.AssetIncidentReport
fields = (
"id",
"asset",
"incident_type",
"incident_location",
"incident_description",
"injuries_sustained",
"loss_of_property",
"witnesses",
"submitted_by",
"police_abstract",
"police_abstract_obtained",
"created_at",
)
def get_created_at(self, obj):
date = obj.created_at
date = f"{date.year}-{date.month}-{date.day} {date.hour}:{date.minute}"
return date
def get_submitted_by(self, instance):
if instance.submitted_by:
return instance.submitted_by.email
return instance.submitted_by
def to_representation(self, instance):
instance_data = super().to_representation(instance)
serial_no = instance.asset.serial_number
asset_code = instance.asset.asset_code
instance_data["asset"] = f"{serial_no} - {asset_code}"
return instance_data
class AssetHealthSerializer(serializers.ModelSerializer):
asset_type = serializers.ReadOnlyField()
model_number = serializers.ReadOnlyField(source="model_number__name")
count_by_status = serializers.ReadOnlyField(source="current_status")
class Meta:
model = models.Asset
fields = ("asset_type", "model_number", "count_by_status")
class DepartmentAssetSerializer(serializers.ModelSerializer):
asset_type = serializers.ReadOnlyField()
class Meta:
model = models.Asset
fields = ("uuid", "asset_category", "serial_number", "asset_code", "asset_type")
class AssetSpecsSerializer(serializers.ModelSerializer):
class Meta:
model = models.AssetSpecs
fields = (
"id",
"year_of_manufacture",
"processor_speed",
"screen_size",
"processor_type",
"storage",
"memory",
)
extra_kwargs = {
"processor_speed": {"required": False},
"processor_type": {"required": False},
"screen_size": {"required": False},
"memory": {"required": False},
"storage": {"required": False},
"year_of_manufacture": {"required": False},
}
validators = []
def validate(self, fields):
not_unique = models.AssetSpecs.objects.filter(**fields).exists()
if not_unique:
raise serializers.ValidationError(
"Similar asset specification already exist"
)
return fields
class StateTransitionSerializer(serializers.ModelSerializer):
class Meta:
model = models.StateTransition
fields = (
"id",
"asset_incident_report",
"incident_report_state",
"asset_state_from_report",
)
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 isobar. All Rights Reserved.
#
# Usage:
# python teeth-whitening.py pic.jpg
#
import os
import sys
import argparse
import cv2
import dlib
import numpy as np
from skimage import io
from PIL import Image
from scipy.spatial import distance
import IsobarImg
DEBUG = False
MAR = 0.30
# 100*100
FACE_IMAGE_SIZE = 10000
# brightness and contrast
# https://stackoverflow.com/questions/39308030/how-do-i-increase-the-contrast-of-an-image-in-python-opencv/50053219#50053219
#CONST_PREDICTOR_PATH = "./data/shape_predictor_68_face_landmarks.dat"
CONST_IMAGE_PATH = "./faces/Tom_Cruise_avp_2014_4.jpg"
# CONST_IMAGE_PATH = "./faces/ko_p.jpg"
parser = argparse.ArgumentParser(description='teeth whitening editor')
parser.add_argument('predictor_path', help='predictor file')
parser.add_argument('file', help='image file')
parser.add_argument('-a', metavar='alpha', default='2.0', type=float, help='alpha value range: 1.0-3.0')
parser.add_argument('-b', metavar='beta', default='50', type=int, help='beta value range: 0-100')
args = parser.parse_args()
alpha = args.a
beta = args.b
#alpha = 1.0 # 1.0-3.0
#beta = 50 # 0-100
predictor_path = args.predictor_path
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
# compute the mouth aspect ratio
# opening mouth: mar > 0.30
def mouth_aspect_ratio(mouth):
D = distance.euclidean(mouth[33], mouth[51])
# D1 = distance.euclidean(mouth[50], mouth[58])
# D2 = distance.euclidean(mouth[51], mouth[57])
# D3 = distance.euclidean(mouth[52], mouth[56])
D1 = distance.euclidean(mouth[61], mouth[67])
D2 = distance.euclidean(mouth[62], mouth[66])
D3 = distance.euclidean(mouth[63], mouth[65])
mar = (D1+D2+D3)/(3*D)
print("mar={}".format(mar))
return mar;
def alphaBlend(img1, img2, mask):
""" alphaBlend img1 and img 2 (of CV_8UC3) with mask (CV_8UC1 or CV_8UC3)
"""
if mask.ndim==3 and mask.shape[-1] == 3:
alpha = mask/255.0
else:
alpha = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)/255.0
blended = cv2.convertScaleAbs(img1*(1-alpha) + img2*alpha)
return blended
def shape2np(s):
num = len(s.parts())
np_points = np.zeros((num,2), np.int32)
idx = 0
for p in s.parts():
np_points[idx] = (p.x, p.y)
idx = idx + 1
return np_points
def main():
filename = os.path.basename(image_path)
publicname = os.path.dirname(image_path)[:-6]
dirname = os.path.join(publicname, 'result', filename)
rootname = os.path.dirname(image_path)[:-13]
if not os.path.exists(dirname):
os.makedirs(dirname)
img = io.imread(image_path)
h,w,c = img.shape
if c == 4:
img = img[:,:,:3]
io.imsave(dirname+"/before.jpg", img)
res = IsobarImg.beautifyImage(dirname+"/before.jpg")
res.save(dirname+"/after.jpg")
faces = detector(img, 1)
if len(faces)==0:
print("Face not found")
return
max_face = 0
max_face_id = 0
for f, d in enumerate(faces):
face_box = (d.bottom()-d.top())*(d.right()-d.left())
if face_box > max_face:
max_face = face_box
max_face_id = f
for f, d in enumerate(faces):
if f == max_face_id:
shape = predictor(img, d)
break
if (d.bottom()-d.top())*(d.right()-d.left()) < FACE_IMAGE_SIZE:
print("Face too small:{}".format(max_face))
return
np_points = shape2np(shape)
# detect an open mouth
if (mouth_aspect_ratio(np_points)<MAR):
print("Mouth not open")
return
# facial points
# https://ibug.doc.ic.ac.uk/resources/facial-point-annotations/
# crop face
crop_img = img[d.top():d.bottom(),d.left():d.right()]
if crop_img.size == 0:
return
if DEBUG:
io.imsave(dirname+"/face.jpg", crop_img)
# crop mouth
mouth_max_point = np.max(np_points[60:], axis=0)
mouth_min_point = np.min(np_points[60:], axis=0)
if DEBUG:
io.imsave(dirname+"/mouth.jpg", img[mouth_min_point[1]:mouth_max_point[1], mouth_min_point[0]:mouth_max_point[0]])
# mouth: 48-67
# teeth: 60-67
# create blank image
mask = np.zeros((d.bottom()-d.top(), d.right()-d.left()), np.uint8)
# load and save image
# im = Image.fromarray(mask)
# im.save("blank.jpg", im)
# create teeth mask
cv2.fillConvexPoly(mask, np.int32(np_points[60:]-(d.left(), d.top())), 1)
if DEBUG:
cv2.imwrite(dirname+"/mask.jpg", mask)
crop_jpg_with_mask= cv2.bitwise_and(crop_img, crop_img, mask = mask)
# smoothing mask
blur_mask = cv2.GaussianBlur(crop_jpg_with_mask,(21,21), 11.0)
if DEBUG:
io.imsave(dirname+'/blur_mask.jpg', blur_mask)
# convert rgb2rgba
crop_png = cv2.cvtColor(crop_img, cv2.COLOR_RGB2RGBA)
np_alpha = blur_mask[:, :, 0]/255.0
crop_png[:, :, 3] = blur_mask[:, :, 0]
# brightness and contrast
# Ref: https://docs.opencv.org/3.4/d3/dc1/tutorial_basic_linear_transform.html
# cv2.convertScaleAbs is more faster
crop_png_with_brightness = cv2.convertScaleAbs(crop_png, alpha=alpha, beta=beta)
#crop_png_with_brightness = np.zeros(crop_png.shape, crop_png.dtype)
#for y in range(crop_png.shape[0]):
# for x in range(crop_png.shape[1]):
# b,g,r,c = crop_png[y,x]
# if (c!=0):
# crop_png_with_brightness[y,x] = np.clip(alpha*crop_png[y,x] + beta, 0, 255)
if DEBUG:
io.imsave(dirname+"/brightness.png", crop_png_with_brightness)
# output
output = np.zeros(crop_img.shape, crop_img.dtype)
# merge two images with alpha channel
# Ref: https://stackoverflow.com/questions/41508458/python-opencv-overlay-an-image-with-transparency
#output[:, :, 0] = (1.0 - np_alpha) * crop_png[:, :, 0] + np_alpha * crop_png_with_brightness[:, :, 0]
#output[:, :, 1] = (1.0 - np_alpha) * crop_png[:, :, 1] + np_alpha * crop_png_with_brightness[:, :, 1]
#output[:, :, 2] = (1.0 - np_alpha) * crop_png[:, :, 2] + np_alpha * crop_png_with_brightness[:, :, 2]
np_alpha = np_alpha.reshape(crop_img.shape[0], crop_img.shape[1], 1)
output[:, :, :] = (1.0 - np_alpha) * crop_png[:, :, :3] + np_alpha * crop_png_with_brightness[:, :, :3]
if DEBUG:
io.imsave(dirname+"/output.jpg", output)
#crop_png = cv2.add(src_png, crop_png_with_brightness)
# cv2.imwrite("output.jpg", output)
# save before and after images
# io.imsave(dirname+"/before.jpg", img)
img[d.top():d.bottom(),d.left():d.right()] = output
io.imsave(dirname+"/whitening.jpg", img)
res = IsobarImg.beautifyImage(dirname+"/whitening.jpg")
res.save(dirname+"/after.jpg")
if __name__ == "__main__":
global image_path
if (len(sys.argv)>1):
image_path = sys.argv[2]
else:
image_path = CONST_IMAGE_PATH
main()
|
# coding: utf-8
""" URLs de l'application Utilisateurs """
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^ask/(?P<uid>\d+)$', 'social.views.page.add_friend', name='page-add-friend'),
)
|
prisListe = [
{"salat" : 12, "fisk" : 99, "melk" : 12, "brod" :12},
{"salat" : 22, "fisk" : 60, "melk" : 18, "brod" :21},
{"salat" : 8, "fisk" : 120, "melk" : 10, "brod" :19},
{"salat" : 18, "fisk" : 40, "melk" : 30, "brod" :59},
{"salat" : 15, "fisk" : 200, "melk" : 40, "brod" :9},
]
butikker = ["Rema1000", "Meny", "Kiwi","Spar", "Joker"]
def finnButikk(handleListe, butikker, prisListe):
minstePris = 2000
butikkIndeks = 0
teller = 0
for butikk in prisListe:
prisPaaButikk = 0
for vare in butikk:
if(vare in handleListe):
prisPaaButikk+=butikk[vare]
if(prisPaaButikk < minstePris):
minstePris = prisPaaButikk
butikkIndeks = teller
teller+=1
return butikker[butikkIndeks]
|
# Generated by Django 3.1.7 on 2021-04-11 14:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loginanddashboard', '0015_auto_20210411_1053'),
]
operations = [
migrations.CreateModel(
name='AllowancesDeductions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('allowance_type', models.CharField(max_length=100)),
('a_description', models.CharField(max_length=200)),
('a_amount', models.BigIntegerField(null=True)),
('deduction_type', models.CharField(max_length=100)),
('d_description', models.CharField(max_length=200)),
('d_amount', models.BigIntegerField(null=True)),
],
),
migrations.DeleteModel(
name='Allowances',
),
migrations.DeleteModel(
name='Deductions',
),
migrations.AddField(
model_name='payroll',
name='bonus',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='payroll',
name='net_salary',
field=models.FloatField(null=True),
),
migrations.DeleteModel(
name='PayrollDetails',
),
]
|
import math
num = int(input())
num = math.fabs(num)
count = 1
num = num // 10
while num > 0:
num //= 10
count += 1
print(count)
|
from __future__ import print_function
import inspect
import os
def is_debugging():
for frame in inspect.stack():
if frame[1].endswith('pydevd.py'):
return True
return False
def follow_link(fname):
print(fname, end=" ")
if os.path.islink(fname):
print("\n\t->", end="")
next_l = os.readlink(fname)
d = '/'.join(fname.split('/')[:-1])
if os.path.isabs(next_l):
follow_link(next_l)
else:
follow_link(d + '/' + next_l)
else:
print()
if __name__ == "main":
out = os.popen('locate libOpenCL.so').readlines()
for i in out:
i = i.strip()
follow_link(i)
print(os.environ['PYTHONPATH'].split(os.pathsep))
print(os.environ['LD_LIBRARY_PATH'])
|
import numpy as np
from pyphocorehelpers.indexing_helpers import build_pairwise_indicies
from scipy.ndimage import gaussian_filter1d
# plotting:
import matplotlib.pyplot as plt
def _compute_single_lap_reliability(curr_lap_filtered_spikes_df, variable_extents_array, min_subdivision_resolution:float = 0.01, spike_blurring:float = 80.0, span_width:int=None, debug_print=False):
""" """
# for now, just do x (first variable)
curr_variable_extents = variable_extents_array[0]
num_subdivisions = int(np.ceil((curr_variable_extents[1] - curr_variable_extents[0])/min_subdivision_resolution))
actual_subdivision_step_size = (curr_variable_extents[1] - curr_variable_extents[0]) / float(num_subdivisions) # the actual exact size of the bin
if debug_print:
print(f'for min_subdivision_resolution: {min_subdivision_resolution} -> num_subdivisions: {num_subdivisions}, actual_subdivision_step_size: {actual_subdivision_step_size}')
out_indicies = np.arange(num_subdivisions)
out_digitized_position_bins = np.linspace(curr_variable_extents[0], curr_variable_extents[1], num_subdivisions, dtype=float)#.astype(float)
out_within_lap_spikes_overlap = np.zeros_like(out_digitized_position_bins, dtype=float)
curr_digitized_variable = np.digitize(curr_lap_filtered_spikes_df['x'].to_numpy(), out_digitized_position_bins) # these are indicies
# perform span_width: a span is a fixed width for each spike instead of a single bin wide delta function (using a rectangle function instead)
if (span_width is not None) and (span_width > 0.0):
span_range = np.arange(1, span_width)
# span_ranges = [i-span_range for i in curr_digitized_variable]
for i, value in enumerate(curr_digitized_variable):
out_within_lap_spikes_overlap[value-span_range] += 5.0 # set spikes to 1.0
out_within_lap_spikes_overlap[value] += 10.0 # set spikes to 1.0
out_within_lap_spikes_overlap[value+span_range] += 5.0 # set spikes to 1.0
else:
out_within_lap_spikes_overlap[curr_digitized_variable] = 10.0 # set spikes to 1.0
# perform spike_blurring:
if (spike_blurring is not None) and (spike_blurring > 0.0):
# convert spike_blurring from real units (which is how it's input) to bins
spike_blurring_step_units = (spike_blurring / actual_subdivision_step_size)
if debug_print:
print(f'spike_blurring: {spike_blurring}, spike_blurring_step_units: {spike_blurring_step_units}')
out_within_lap_spikes_overlap = gaussian_filter1d(out_within_lap_spikes_overlap, sigma=spike_blurring_step_units)
else:
if debug_print:
print('spike blurring disabled because spike_blurring is set to None or 0.0')
# np.convolve(out[curr_digitized_variable], np.
return out_indicies, out_digitized_position_bins, out_within_lap_spikes_overlap
def compute_lap_to_lap_reliability(active_pf, filtered_spikes_df, lap_ids, cellind, min_subdivision_resolution:float = 0.01, plot_results=False, plot_horizontal=True, debug_print=True):
""" Computes the reliability of a placecell from lap-to-lap
Example:
curr_result_label = 'maze1'
sess = curr_kdiba_pipeline.filtered_sessions[curr_result_label]
# sess = curr_kdiba_pipeline.sess
curr_neuron_IDX = 2
# curr_neuron_IDX = 3 # good for end platform analysis
curr_cell_ID = sess.spikes_df.spikes.neuron_ids[curr_neuron_IDX]
print(f'curr_neuron_IDX: {curr_neuron_IDX}, curr_cell_ID: {curr_cell_ID}')
# pre-filter by spikes that occur in one of the included laps for the filtered_spikes_df
filtered_spikes_df = sess.spikes_df.copy()
time_variable_name = filtered_spikes_df.spikes.time_variable_name # 't_rel_seconds'
lap_ids = sess.laps.lap_id
# lap_flat_idxs = sess.laps.get_lap_flat_indicies(lap_ids)
out_indicies, out_digitized_position_bins, out, all_laps_reliability = compute_lap_to_lap_reliability(curr_kdiba_pipeline.computation_results[curr_result_label].computed_data['pf2D'], filtered_spikes_df, lap_ids, curr_neuron_IDX, debug_print=False);
"""
time_variable_name = filtered_spikes_df.spikes.time_variable_name # 't_rel_seconds'
if active_pf.ndim < 2:
variable_array = [active_pf.x]
label_array = ["X position (cm)"]
else:
variable_array = [active_pf.x, active_pf.y]
label_array = ["X position (cm)", "Y position (cm)"]
# compute extents:
variable_extents_array = [(np.nanmin(a_var), np.nanmax(a_var)) for a_var in variable_array]
# for now, just do x (first variable)
curr_variable_extents = variable_extents_array[0]
num_subdivisions = int(np.ceil((curr_variable_extents[1] - curr_variable_extents[0])/min_subdivision_resolution))
if debug_print:
print(f'for min_subdivision_resolution: {min_subdivision_resolution} -> num_subdivisions: {num_subdivisions}')
# Pre-allocate output variables:
out_indicies = np.arange(num_subdivisions)
out_digitized_position_bins = np.linspace(curr_variable_extents[0], curr_variable_extents[1], num_subdivisions, dtype=float)#.astype(float)
out_within_lap_spikes_overlap = np.zeros([num_subdivisions, len(lap_ids)], dtype=float)
# all spike times and positions for the specified cellind:
spk_pos_, spk_t_ = active_pf.spk_pos[cellind], active_pf.spk_t[cellind]
# filtered_spikes_df = filtered_spikes_df[np.isin(filtered_spikes_df['lap'], included_lap_ids)] # get only the spikes that occur in one of the included laps for the filtered_spikes_df
if debug_print:
print('filtering spikes by times in pf2D', end=' ')
filtered_spikes_df = filtered_spikes_df[np.isin(filtered_spikes_df[time_variable_name].to_numpy(), spk_t_)] # get only the spikes that occur in one of the included laps for the filtered_spikes_df
if debug_print:
print('done.')
# testing only:
# lap_ids = [lap_ids[0], lap_ids[1]] # TODO: TEST ONLY FIRST ELEMENT
flat_lap_idxs = np.arange(len(lap_ids))
should_share_non_common_axes_lims = False
if plot_results:
if plot_horizontal:
fig, axs = plt.subplots(1, len(lap_ids), sharex=should_share_non_common_axes_lims, sharey=True, figsize=(40, 24))
else:
# vertical
fig, axs = plt.subplots(len(lap_ids), 1, sharex=True, sharey=should_share_non_common_axes_lims, figsize=(24, 40))
for lap_idx, lap_ID in zip(flat_lap_idxs, lap_ids):
# for each lap
curr_lap_filtered_spikes_df = filtered_spikes_df[filtered_spikes_df['lap'] == lap_ID] # get only the spikes that occur in one of the included laps for the filtered_spikes_df
if debug_print:
print(f'{lap_idx},{lap_ID}: spikes {np.shape(curr_lap_filtered_spikes_df)[0]}')
out_indicies, out_digitized_position_bins, out_within_lap_spikes_overlap[:, lap_idx] = _compute_single_lap_reliability(curr_lap_filtered_spikes_df, variable_extents_array, min_subdivision_resolution=min_subdivision_resolution, spike_blurring=5.0, span_width=None, debug_print=debug_print)
# Debug Plotting to test the produced output:
if plot_results:
if plot_horizontal:
axs[lap_idx].plot(out_within_lap_spikes_overlap[:, lap_idx], out_digitized_position_bins)
else:
# vertical
axs[lap_idx].plot(out_digitized_position_bins, out_within_lap_spikes_overlap[:, lap_idx])
# Actual Computations of Reliability:
out_pairwise_pair_results = np.zeros_like(out_within_lap_spikes_overlap)
# do simple diff:
laps_spikes_overlap_diff = np.diff(out_within_lap_spikes_overlap, axis=1) # the element-wise diff of the overlap. Shows changes.
out_pairwise_pair_results[:, 1:] = laps_spikes_overlap_diff
# out_pairwise_pair_results[:, -1] = np.zeros_like(out_within_lap_spikes_overlap[:,0])
# do custom pairwise operation:
# for first_item_lap_idx, next_item_lap_idx in list(out_pairwise_flat_lap_indicies):
# first_item = out_within_lap_spikes_overlap[:, first_item_lap_idx]
# next_item = out_within_lap_spikes_overlap[:, next_item_lap_idx]
# out_pairwise_pair_results[:, next_item_lap_idx] = (first_item * next_item) # the result should be stored in the index of the second item, if we're doing the typical backwards style differences.
# # print(f'np.max(out_pairwise_pair_results[:, next_item_lap_idx]): {np.max(out_pairwise_pair_results[:, next_item_lap_idx])}')
if debug_print:
print(f'max out: {np.max(out_pairwise_pair_results)}')
# add to the extant plot as a new color:
if plot_results:
for lap_idx, lap_ID in zip(flat_lap_idxs, lap_ids):
# curr_lap_alt_ax = axs[lap_idx]
if plot_horizontal:
curr_lap_alt_ax = axs[lap_idx].twiny()
curr_lap_alt_ax.plot(out_pairwise_pair_results[:, lap_idx], out_digitized_position_bins, '--r')
else:
# vertical
curr_lap_alt_ax = axs[lap_idx].twinx()
curr_lap_alt_ax.plot(out_digitized_position_bins, out_pairwise_pair_results[:, lap_idx], '--r')
cum_laps_reliability = np.cumprod(out_within_lap_spikes_overlap, axis=1)
all_laps_reliability = np.prod(out_within_lap_spikes_overlap, axis=1, keepdims=True)
if plot_results:
fig_result, axs_result = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(24, 40))
axs_result[0].plot(out_digitized_position_bins, all_laps_reliability, 'r')
axs_result[1].plot(out_digitized_position_bins, cum_laps_reliability, 'r')
return out_indicies, out_digitized_position_bins, out_within_lap_spikes_overlap
# def compute_reliability_metrics(out_indicies, out_digitized_position_bins, out_within_lap_spikes_overlap, debug_print=False, plot_results=False):
# """ Takes input from compute_lap_to_lap_reliability(...) to build the actual reliability metrics """
# # Actual Computations of Reliability:
# out_pairwise_pair_results = np.zeros_like(out_within_lap_spikes_overlap)
# # do simple diff:
# laps_spikes_overlap_diff = np.diff(out_within_lap_spikes_overlap, axis=1) # the element-wise diff of the overlap. Shows changes.
# out_pairwise_pair_results[:, 1:] = laps_spikes_overlap_diff
# # out_pairwise_pair_results[:, -1] = np.zeros_like(out_within_lap_spikes_overlap[:,0])
# # do custom pairwise operation:
# # for first_item_lap_idx, next_item_lap_idx in list(out_pairwise_flat_lap_indicies):
# # first_item = out_within_lap_spikes_overlap[:, first_item_lap_idx]
# # next_item = out_within_lap_spikes_overlap[:, next_item_lap_idx]
# # out_pairwise_pair_results[:, next_item_lap_idx] = (first_item * next_item) # the result should be stored in the index of the second item, if we're doing the typical backwards style differences.
# # # print(f'np.max(out_pairwise_pair_results[:, next_item_lap_idx]): {np.max(out_pairwise_pair_results[:, next_item_lap_idx])}')
# if debug_print:
# print(f'max out: {np.max(out_pairwise_pair_results)}')
# lap_ids
# flat_lap_idxs = np.arange(len(lap_ids))
# # add to the extant plot as a new color:
# if plot_results:
# for lap_idx, lap_ID in zip(flat_lap_idxs, lap_ids):
# # curr_lap_alt_ax = axs[lap_idx]
# if plot_horizontal:
# curr_lap_alt_ax = axs[lap_idx].twiny()
# curr_lap_alt_ax.plot(out_pairwise_pair_results[:, lap_idx], out_digitized_position_bins, '--r')
# else:
# # vertical
# curr_lap_alt_ax = axs[lap_idx].twinx()
# curr_lap_alt_ax.plot(out_digitized_position_bins, out_pairwise_pair_results[:, lap_idx], '--r')
# cum_laps_reliability = np.cumprod(out_within_lap_spikes_overlap, axis=1)
# all_laps_reliability = np.prod(out_within_lap_spikes_overlap, axis=1, keepdims=True)
# if plot_results:
# fig_result, axs_result = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(24, 40))
# axs_result[0].plot(out_digitized_position_bins, all_laps_reliability, 'r')
# axs_result[1].plot(out_digitized_position_bins, cum_laps_reliability, 'r')
|
import json
from ctypes import (
c_char_p,
c_size_t,
c_uint8,
c_uint32,
c_uint64,
c_bool,
c_void_p,
byref,
pointer,
addressof,
)
from .lib import (
_lib,
_encode,
_flags,
RNP_KEY_EXPORT_ARMORED,
RNP_KEY_EXPORT_PUBLIC,
RNP_KEY_EXPORT_SECRET,
RNP_KEY_EXPORT_SUBKEYS,
RNP_JSON_PUBLIC_MPIS,
RNP_JSON_SECRET_MPIS,
RNP_JSON_SIGNATURES,
RNP_JSON_SIGNATURE_MPIS,
RNP_KEY_REMOVE_PUBLIC,
RNP_KEY_REMOVE_SECRET,
RNP_KEY_REMOVE_SUBKEYS,
RNP_JSON_DUMP_MPI,
RNP_JSON_DUMP_RAW,
RNP_JSON_DUMP_GRIP,
RNP_KEY_SUBKEYS_ONLY,
RNP_ERROR_KEY_NOT_FOUND,
RNP_ERROR_NO_SUITABLE_KEY,
RnpException,
)
from .output import Output
class Key:
def __init__(self, obj, free=True):
self._obj = obj
self._free = free
def __del__(self):
if self._free:
_lib.rnp_key_handle_destroy(self._obj)
def obj(self):
return self._obj
def alg(self):
return self._string_property(_lib.rnp_key_get_alg)
def fingerprint(self):
return self._string_property(_lib.rnp_key_get_fprint)
def keyid(self):
return self._string_property(_lib.rnp_key_get_keyid)
def grip(self):
return self._string_property(_lib.rnp_key_get_grip)
def primary_grip(self):
return self._string_property(_lib.rnp_key_get_primary_grip)
def primary_fingerprint(self):
return self._string_property(_lib.rnp_key_get_primary_fprint)
def primary_userid(self):
return self._string_property(_lib.rnp_key_get_primary_uid)
def userids(self):
from .uid import UID
return map(
lambda uid: uid.data().decode("utf-8"),
filter(lambda uid: uid.type() == UID.RNP_USER_ID, self.uids()),
)
def uids(self):
from .uid import UID
count = c_size_t()
_lib.rnp_key_get_uid_count(self._obj, byref(count))
for i in range(count.value):
userid = c_void_p()
_lib.rnp_key_get_uid_handle_at(self._obj, i, byref(userid))
yield UID(userid.value)
def add_userid(
self, userid, hashalg=None, expiration_time=0, key_flags=0, primary=False
):
_lib.rnp_key_add_uid(
self._obj,
userid.encode("utf-8"),
_encode(hashalg),
expiration_time,
key_flags,
primary,
)
def signatures(self):
from .signature import Signature
count = c_size_t()
_lib.rnp_key_get_signature_count(self._obj, byref(count))
for i in range(count.value):
psig = c_void_p()
_lib.rnp_key_get_signature_at(self._obj, i, byref(psig))
yield Signature(psig.value)
def bits(self):
bits = c_uint32()
_lib.rnp_key_get_bits(self._obj, byref(bits))
return bits.value
def qbits(self):
qbits = c_uint32()
_lib.rnp_key_get_dsa_qbits(self._obj, byref(qbits))
return qbits.value
def curve(self):
return self._string_property(_lib.rnp_key_get_curve)
def is_locked(self):
pbool = c_bool()
_lib.rnp_key_is_locked(self.obj(), byref(pbool))
return pbool.value
def lock(self):
_lib.rnp_key_lock(self.obj())
def unlock(self, password=None):
_lib.rnp_key_unlock(self._obj, _encode(password))
def is_protected(self):
pbool = c_bool()
_lib.rnp_key_is_protected(self.obj(), byref(pbool))
return pbool.value
def protect(
self,
password,
cipher=None,
cipher_mode=None,
s2k_hashalg=None,
s2k_iterations=0,
):
_lib.rnp_key_protect(
self._obj,
_encode(password),
_encode(cipher),
_encode(cipher_mode),
_encode(s2k_hashalg),
s2k_iterations,
)
def unprotect(self, password=None):
_lib.rnp_key_unprotect(self._obj, _encode(password))
def is_primary(self):
return self._bool_property(_lib.rnp_key_is_primary)
def is_sub(self):
return self._bool_property(_lib.rnp_key_is_sub)
def has_public_key(self):
return self._bool_property(_lib.rnp_key_have_public)
def has_secret_key(self):
return self._bool_property(_lib.rnp_key_have_secret)
def is_valid(self):
return self._bool_property(_lib.rnp_key_is_valid)
def protection_cipher(self):
return self._string_property(_lib.rnp_key_get_protection_cipher)
def protection_hashalg(self):
return self._string_property(_lib.rnp_key_get_protection_hash)
def protection_mode(self):
return self._string_property(_lib.rnp_key_get_protection_mode)
def protection_type(self):
return self._string_property(_lib.rnp_key_get_protection_type)
def protection_iterations(self):
return self._size_t_property(_lib.rnp_key_get_protection_iterations)
def export_public(self, armored=True, include_subkeys=False, outp=None):
with Output.default(outp) as outp:
self._export(armored, True, False, include_subkeys, outp)
return outp.default_output()
def export_secret(self, armored=True, include_subkeys=False, outp=None):
with Output.default(outp) as outp:
self._export(armored, False, True, include_subkeys, outp)
return outp.default_output()
def export_revocation(self, hashalg=None, code=None, reason=None, outp=None):
with Output.default(outp) as outp:
_lib.rnp_key_export_revocation(
self.obj(),
outp.obj(),
0,
_encode(hashalg),
_encode(code),
_encode(reason),
)
return outp.default_output()
def public_key_data(self):
return self._buf_property(_lib.rnp_get_public_key_data)
def secret_key_data(self):
return self._buf_property(_lib.rnp_get_secret_key_data)
def to(self, usage, subkeys_only=False):
pkey = c_void_p()
flags = _flags([(subkeys_only, RNP_KEY_SUBKEYS_ONLY)])
rc = _lib.rnp_key_get_default_key(
self._obj, usage.encode("ascii"), flags, byref(pkey)
)
if rc not in [0, RNP_ERROR_KEY_NOT_FOUND, RNP_ERROR_NO_SUITABLE_KEY]:
raise RnpException("rnp_key_get_default_key failed", rc)
if pkey.value:
return Key(pkey.value)
return None
def json(
self,
public_mpis=False,
secret_mpis=False,
signatures=True,
signature_mpis=False,
):
flags = _flags(
[
(public_mpis, RNP_JSON_PUBLIC_MPIS),
(secret_mpis, RNP_JSON_SECRET_MPIS),
(signatures, RNP_JSON_SIGNATURES),
(signature_mpis, RNP_JSON_SIGNATURE_MPIS),
]
)
jsn = c_char_p()
try:
_lib.rnp_key_to_json(self._obj, flags, byref(jsn))
# pylint: disable=E1101
return json.loads(jsn.value.decode("utf-8"))
finally:
_lib.rnp_buffer_destroy(jsn)
def packets_json(self, secret=False, mpi=False, raw=False, grip=False):
flags = _flags(
[
(mpi, RNP_JSON_DUMP_MPI),
(raw, RNP_JSON_DUMP_RAW),
(grip, RNP_JSON_DUMP_GRIP),
]
)
jsn = c_char_p()
try:
_lib.rnp_key_packets_to_json(self._obj, secret, flags, byref(jsn))
# pylint: disable=E1101
return json.loads(jsn.value.decode("utf-8"))
finally:
_lib.rnp_buffer_destroy(jsn)
def remove(self, remove_public=True, remove_secret=True, remove_subkeys=False):
flags = _flags(
[
(remove_public, RNP_KEY_REMOVE_PUBLIC),
(remove_secret, RNP_KEY_REMOVE_SECRET),
(remove_subkeys, RNP_KEY_REMOVE_SUBKEYS),
]
)
_lib.rnp_key_remove(self._obj, flags)
def revoke(self, hashalg=None, code=None, reason=None):
_lib.rnp_key_revoke(
self._obj, 0, _encode(hashalg), _encode(code), _encode(reason)
)
def revocation_signature(self):
from .signature import Signature
psig = c_void_p()
_lib.rnp_key_get_revocation_signature(self._obj, byref(psig))
if psig.value:
return Signature(psig.value)
return None
def subkeys(self):
count = c_size_t()
_lib.rnp_key_get_subkey_count(self._obj, byref(count))
for i in range(count.value):
pkey = c_void_p()
_lib.rnp_key_get_subkey_at(self._obj, i, byref(pkey))
yield Key(pkey.value)
def can_sign(self):
result = c_bool()
_lib.rnp_key_allows_usage(self._obj, "sign".encode("ascii"), byref(result))
return result.value
def can_certify(self):
result = c_bool()
_lib.rnp_key_allows_usage(self._obj, "certify".encode("ascii"), byref(result))
return result.value
def can_encrypt(self):
result = c_bool()
_lib.rnp_key_allows_usage(self._obj, "encrypt".encode("ascii"), byref(result))
return result.value
def can_authenticate(self):
result = c_bool()
_lib.rnp_key_allows_usage(
self._obj, "authenticate".encode("ascii"), byref(result)
)
return result.value
def is_revoked(self):
return self._bool_property(_lib.rnp_key_is_revoked)
def is_compromised(self):
return self._bool_property(_lib.rnp_key_is_compromised)
def is_retired(self):
return self._bool_property(_lib.rnp_key_is_retired)
def is_superseded(self):
return self._bool_property(_lib.rnp_key_is_superseded)
def revocation_reason(self):
return self._string_property(_lib.rnp_key_get_revocation_reason)
def creation_time(self):
time = c_uint32()
_lib.rnp_key_get_creation(self._obj, byref(time))
return time.value
def lifetime(self):
secs = c_uint32()
_lib.rnp_key_get_expiration(self._obj, byref(secs))
return secs.value
def set_lifetime(self, secs):
_lib.rnp_key_set_expiration(self._obj, secs)
def valid_until(self):
result = c_uint64()
_lib.rnp_key_valid_till64(self._obj, byref(result))
return result.value
def _export(self, armored, public_key, secret_key, include_subkeys, outp):
flags = _flags(
[
(armored, RNP_KEY_EXPORT_ARMORED),
(public_key, RNP_KEY_EXPORT_PUBLIC),
(secret_key, RNP_KEY_EXPORT_SECRET),
(include_subkeys, RNP_KEY_EXPORT_SUBKEYS),
]
)
_lib.rnp_key_export(self._obj, outp.obj(), flags)
def _buf_property(self, fn):
buf = pointer(c_uint8())
buf_len = c_size_t()
try:
fn(self._obj, byref(buf), byref(buf_len))
return bytes(
(c_uint8 * buf_len.value).from_address(addressof(buf.contents))
)
finally:
_lib.rnp_buffer_destroy(buf)
def _string_property(self, fn):
prop = c_char_p()
try:
fn(self._obj, byref(prop))
# pylint: disable=E1101
return prop.value.decode("utf-8")
finally:
_lib.rnp_buffer_destroy(prop)
def _bool_property(self, fn):
prop = c_bool()
fn(self._obj, byref(prop))
return prop.value
def _size_t_property(self, fn):
prop = c_size_t()
fn(self._obj, byref(prop))
return prop.value
|
from django.db import models
class Sequence(models.Model):
"""
DNA sequence that can be added and then requested by name.
Sequence can have only "ACTGactg" letters, maximum 20000. Name is up to 100 chars.
Both fields are mandatory.
"""
name = models.CharField(max_length=100, blank=False)
sequence = models.TextField(max_length=20000, blank=False)
def __str__(self):
return self.name
objects = models.Manager()
|
import urllib.request
import urllib.parse
import urllib.error
url="http://www.hhhhhddddd123.com"
'''
try:
request_obj=urllib.request.Request(url=url)
response=urllib.request.urlopen(request_obj)
except urllib.error.URLError as e:
print(e.reason)
'''
try:
responese=urllib.request.urlopen("http://www.doban.com/abc")
except urllib.error.HTTPError as e:
print("发现HTTPError异常,异常原因:",e.reason)
print("响应状态码:",e.code)
print("请求头信息:",e.headers)
except urllib.error.URLError as err:
print(err.reason)
|
from maze_builder.meshes.mesh import MeshBuilder, MeshTransformation, MeshWarp
from maze_builder.meshes.scene import *
from maze_builder.meshes.yafaray import dump_yafaray
from maze_builder.meshes.obj import dump_obj
from maze_builder import random2
from .template import resource
import random
from maze_builder.util import timed, is_verbose
import io
YAFARAY_FILENAME = 'out.yafaray.xml'
OBJ_FILENAME = 'out.obj'
class Mesher2D(object):
def __init__(
self,
wall=0.1,
material=None, density=1, enclosed=False,
**attrs
):
self.wall = wall
self.material = material
self.density = density
self.enclosed = enclosed
self.attrs = attrs
self.rectangle_kwargs = dict(material=self.material, density=self.density)
def __call__(self, cubic):
return self.draw(cubic)
def draw(self, cubic):
with timed(is_verbose(1), 'Meshing maze...', 'Maze generated in {0:.3f}s'):
mesh = MeshBuilder(**self.attrs)
wall = self.wall() if callable(self.wall) else self.wall
for room in cubic.rooms:
self._draw_room(cubic, mesh, room, wall)
for i in range(int(cubic.maxx - cubic.minx) + 1):
x = cubic.minx + i
self._draw_room(cubic, mesh, (x, cubic.maxy+1, cubic.minz), wall)
for j in range(int(cubic.maxx - cubic.minx) + 1):
y = cubic.miny + j
self._draw_room(cubic, mesh, (cubic.maxx+1, y, cubic.minz), wall)
self._draw_room(cubic, mesh, (cubic.maxx+1, cubic.maxy+1, cubic.minz), wall)
return mesh
def _draw_room(self, cubic, mesh, coords, wall):
x, y, z = coords
if z != cubic.maxz:
raise RuntimeError('Illustrator only works for 2D')
xbot = x == cubic.minx
ybot = y == cubic.miny
xtop = x == cubic.maxx+1
ytop = y == cubic.maxy+1
va = mesh.enter_vertex((x, y, z))
vb = mesh.enter_vertex((x + wall, y, z))
vc = mesh.enter_vertex((x + 1, y, z))
vd = mesh.enter_vertex((x, y + wall, z))
ve = mesh.enter_vertex((x + wall, y + wall, z))
vf = mesh.enter_vertex((x + 1, y + wall, z))
vg = mesh.enter_vertex((x, y + 1, z))
vh = mesh.enter_vertex((x + wall, y + 1, z))
vi = mesh.enter_vertex((x + 1, y + 1, z))
vA = mesh.enter_vertex((x, y, z+1))
vB = mesh.enter_vertex((x + wall, y, z+1))
vC = mesh.enter_vertex((x + 1, y, z+1))
vD = mesh.enter_vertex((x, y + wall, z+1))
vE = mesh.enter_vertex((x + wall, y + wall, z+1))
vF = mesh.enter_vertex((x + 1, y + wall, z+1))
vG = mesh.enter_vertex((x, y + 1, z+1))
vH = mesh.enter_vertex((x + wall, y + 1, z+1))
vI = mesh.enter_vertex((x + 1, y + 1, z+1))
# g--h----i ^
# /| /| | +
# G-+H | | Y
# | | | |
# | d--e----f Z X+++>
# |/ /| /| X
# D--E | F | X
# | | b--+-c L
# | |/ |/
# A--B----C
kwargs = self.rectangle_kwargs
mesh.rectangle((vA, vB, vD), **kwargs) # Junction roof
if not xtop and not ytop:
mesh.rectangle((ve, vf, vh), **kwargs) # Room floor
if xtop:
mesh.rectangle((vB, vb, vE), **kwargs)
elif xbot:
mesh.rectangle((va, vA, vd), **kwargs)
if not xtop and cubic.any_active_route_connecting((x, y, z), (x-1, y, z)):
# Draw corridor in -X direction
mesh.rectangle((vD, vE, vd), **kwargs)
mesh.rectangle((vd, ve, vg), **kwargs)
mesh.rectangle((vg, vh, vG), **kwargs)
elif not ytop:
# Draw wall in -X direction
mesh.rectangle((vd, vD, vg), **kwargs)
mesh.rectangle((vD, vE, vG), **kwargs)
mesh.rectangle((vE, ve, vH), **kwargs)
if ytop:
mesh.rectangle((vD, vE, vd), **kwargs)
elif ybot:
mesh.rectangle((va, vb, vA), **kwargs)
if not ytop and cubic.any_active_route_connecting((x, y, z), (x, y-1, z)):
# Draw corridor in -Y direction
mesh.rectangle((vB, vb, vE), **kwargs)
mesh.rectangle((vb, vc, ve), **kwargs)
mesh.rectangle((vc, vC, vf), **kwargs)
elif not xtop:
# Draw wall in -Y direction
mesh.rectangle((vb, vc, vB), **kwargs)
mesh.rectangle((vB, vC, vE), **kwargs)
mesh.rectangle((vE, vF, ve), **kwargs)
class Warper2D(object):
def __init__(self, noise, args=(), scale=1, height=1, offset=(0, 0)):
self.noise = noise
self.args = args
self.scale = scale
self.height = height
self.offset = offset
def __call__(self, mesh):
with timed(is_verbose(1), 'Warping mesh...', 'Mesh warped in {0:.3f}s'):
args = self.args() if callable(self.args) else self.args
scale = self.scale() if callable(self.scale) else self.scale
height = self.height() if callable(self.height) else self.height
offx, offy = self.offset() if callable(self.offset) else self.offset
def warp(v):
x, y, z = v
z += height * scale * self.noise(offx + x / scale, offy + y / scale, *args)
return x, y, z
mesh.perform_warp(warp)
mesh.update_attributes(smoothing_degrees=30)
return mesh
class SceneWrapper(object):
def __call__(self, mesh):
scene = Scene()
scene.add_mesh(mesh)
return scene
class RandomSunMaker(object):
def __call__(self, scene):
light_color = Color(1-random.random()**2.5,1-random.random()**2.5,1-random.random()**1.5)
ambience_color = (1-random.random()**1.5) * (Color.WHITE - light_color)
sky_color = ambience_color.blowout(1-random.random()*random.random()*random.random())
scene.set_background(sky_color)
scene.set_ambience(Ambience(ambience_color))
scene.add_light(Light(random2.hemisphere(1, minz=0.001, maxz=0.6), Color.WHITE, 0.5+random.random(), type='sunlight'))
return scene
class RandomCameraPlacer(object):
def __init__(self, resolution=(1024, 512), distance_scale=0.75):
self.resolution = resolution
self.distance_scale = distance_scale
def __call__(self, scene):
mesh = scene.meshes[0]
minx, maxx = mesh.find_limits((1, 0, 0))
miny, maxy = mesh.find_limits((0, 1, 0))
radius = 1 + random.random() * min((maxx-minx, maxy-miny)) * self.distance_scale
camera_location = random2.hemisphere(radius, minz=1)
_, maxz = mesh.find_limits(
(0, 0, 1),
xbounds=(
max((minx+1, camera_location[0]-1)),
min((maxx-1, camera_location[0]+1)),
),
ybounds=(
max((miny+1, camera_location[1]-1)),
min((maxy-2, camera_location[1]+1)),
)
)
print('Camera limit {}'.format(maxz))
if maxz > -float('inf'):
camera_location = (camera_location[0], camera_location[1], camera_location[2] + maxz)
cminz, cmaxz = mesh.find_limits((0, 0, 1), xbounds=(-1, 1), ybounds=(-1, 1))
look_at = (0, 0, random.random() * (cminz + cmaxz)/2)
scene.set_camera(Camera(camera_location, look_at, resolution=self.resolution))
return scene
class ObjSaver(object):
def __init__(self, filename=OBJ_FILENAME):
self.filename = filename
def __call__(self, mesh):
if isinstance(mesh, Scene):
mesh = mesh.meshes[0]
with open(self.filename, 'w') as f:
dump_obj(f, mesh)
return self.filename
class YafaraySaver(object):
def __init__(self, filename=YAFARAY_FILENAME, material_map=None, xml='simple.yafaray.xml'):
self.filename = filename
self.material_map = material_map or {None: 'defaultMat'}
self.xml = xml
def __call__(self, scene):
with timed(is_verbose(1), 'Saving scene to yafaray XML format...', 'Scene saved in {0:.3f}s'):
dump_yafaray(self.filename, resource(self.xml), scene, material_map={None: 'defaultMat'})
return self.filename
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import argparse
import socket
from time import sleep
import grpc
import uuid
import sys
import traceback
from common.config import Config
from maum.m2u.facade import dialog_pb2
from maum.m2u.da import provider_pb2 as provider
from maum.m2u.da.v1 import talk_pb2 as talk_v1
from maum.m2u.da.v2 import talk_pb2 as talk_v2
from maum.m2u.server import pool_pb2 as pool
from google.protobuf import empty_pb2
class DialogAgentRunner(object):
version = '0.8'
exec_cmd = []
respawn = False
chatbot = ''
skill = ''
conf = Config()
child_pid = 0
pipe = None
res_key = None
da_spec = None
def __init__(self):
pass
def set_exec(self, exec_cmd):
self.exec_cmd = exec_cmd
def set_respawn(self, respawn):
self.respawn = respawn
def set_skill(self, skill):
self.skill = skill
def set_chatbot(self, chatbot):
self.chatbot = chatbot
def set_da_spec(self, da_spec):
if da_spec == 'v1':
self.da_spec = provider.DAP_SPEC_V_1
elif da_spec == 'v2':
self.da_spec = provider.DAP_SPEC_V_2
else:
self.da_spec = provider.DAP_SPEC_UNSPECIFIED
def do_run_forever(self):
pass
def run(self):
if self.respawn:
self.do_run_forever()
else:
self.do_run()
@staticmethod
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 53))
# print(s.getsockname()[0])
ret = s.getsockname()[0]
s.close()
return ret
def do_run(self):
cmd = self.exec_cmd
port = find_unused_port()
cmd.append('-p')
cmd.append(str(port))
self.pipe = subprocess.Popen(cmd, close_fds=True,
stdout=sys.stdout,
stderr=sys.stderr)
if not self.pipe.pid > 0:
print '>>>', 'Cannot exec cmd', self.exec_cmd
exit(1)
# TODO(gih2yun): change to admin server..
admin_export = self.conf.get('admin.export')
print '>>>', 'admin endpoint', admin_export
pool_stub = pool.DialogAgentInstancePoolStub(
grpc.insecure_channel(admin_export))
try:
sleep(2)
child_addr = '127.0.0.1:' + str(port)
child_channel = grpc.insecure_channel(child_addr)
if self.da_spec == provider.DAP_SPEC_V_1:
dainst_stub = talk_v1.DialogAgentProviderStub(child_channel)
elif self.da_spec == provider.DAP_SPEC_V_2:
dainst_stub = talk_v2.DialogAgentProviderStub(child_channel)
else:
dainst_stub = talk_v1.DialogAgentProviderStub(child_channel)
empty_obj = empty_pb2.Empty()
status = dainst_stub.IsReady(empty_obj)
if status.state != provider.DIAG_STATE_IDLE:
print '>>>', 'invalid state ', status.state, self.pipe.pid
self.pipe.terminate()
exit(1)
print '>>>', "is ready done!"
for skill in self.skill:
# GET RUNTIME Parameter
run_params = dainst_stub.GetRuntimeParameters(empty_obj)
print '>>>', "get runtime parameter done!"
# Init
init_param = provider.InitParameter()
init_param.chatbot = self.chatbot
init_param.skill = skill
init_param.lang = dialog_pb2.kor
init_param.sds_remote_addr = \
self.conf.get('sds-svcd.export.ip') + ':' + \
self.conf.get('sds-svcd.export.port')
for p in run_params.params:
init_param.params[p.name] = p.default_value
print '>>>', p.name, p.default_value, init_param.params
res = dainst_stub.Init(init_param)
print '>>>', "init done!"
# DA RES
dares = pool.DialogAgentInstanceResource()
dares.name = 'darun-' + res.name + '-' + str(port)
dares.description = res.description
dares.version = res.version
dares.da_spec = self.da_spec
dares.chatbot = self.chatbot
dares.skill = skill
dares.lang = dialog_pb2.kor
# random uuid를 생성한다.
dares.key = str(uuid.uuid1())
dares.server_ip = DialogAgentRunner.get_local_ip()
dares.server_port = port
dares.launch_type = pool.DAL_DARUN
dares.launcher = 'darun ' + self.version
dares.pid = self.pipe.pid
dares.started_at.GetCurrentTime()
dares.param.CopyFrom(res)
self.res_key = pool_stub.Register(dares)
print ">>> register done!", self.res_key, dares.da_spec
# p = select.poll()
# p.register(self.pipe.stderr)
# p.register(self.pipe.stdout)
#
# while self.pipe.poll() is None:
# try:
# r = p.poll(1)
# except select.error, err:
# if err.args[0] != socket.EINTR:
# raise
# r = []
# for fd, flags in r:
# print fd
# if fd == self.pipe.stderr.fileno():
# print 'E ', self.pipe.stderr.readline()
# if fd == self.pipe.stdout.fileno():
# print 'O ', self.pipe.stdout.readline()
# sleep(0.5)
self.pipe.wait()
print '>>>', 'Child process exit with', self.pipe.returncode
self.stop()
except KeyboardInterrupt:
print '>>>', 'Exit'
self.stop()
except grpc.RpcError as e:
print '>>>', e.__doc__
print '>>>', e.message
print '>>>', traceback.format_exc()
self.stop()
except Exception as e:
print '>>>', e.__doc__
print '>>>', e.message
print '>>>', traceback.format_exc()
self.stop()
def stop(self):
if self.pipe is not None:
admin_endpoint = self.conf.get('admin.export')
pool_stub = pool.DialogAgentInstancePoolStub(
grpc.insecure_channel(admin_endpoint))
dares_key = pool.DialogAgentInstanceKey()
dares_key.key = self.res_key.key
stat = pool_stub.Unregister(dares_key)
print stat
self.pipe.terminate()
def find_unused_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
addr, port = s.getsockname()
# print 'find unused port', addr, port
s.close()
return port
def run_cmd():
conf = Config()
conf.init('m2u.conf') # for prod
# conf.init('/srv/minds/etc/m2u.conf') # for debug
parser = argparse.ArgumentParser(
description='DA runner')
parser.add_argument('cmd',
nargs='+',
help='Exeutable command')
parser.add_argument('-c', '--chatbot',
nargs='?',
dest='chatbot',
required=True,
help='Specify Chatbot to run')
parser.add_argument('-s', '--skill',
nargs='*',
dest='skill',
required=True,
help='Skill in chatbot')
parser.add_argument('-r', '--respawn',
nargs='?',
dest='respawn',
required=False,
help='Enable respawn')
parser.add_argument('-v', '--version',
nargs='?',
dest='da_spec',
required=False,
default=provider.DAP_SPEC_UNSPECIFIED,
help='define da spec')
args = parser.parse_args()
print args.cmd
runner = DialogAgentRunner()
runner.set_exec(args.cmd)
runner.set_chatbot(args.chatbot)
runner.set_skill(args.skill)
runner.set_da_spec(args.da_spec)
# runner.set_respawn(args.respawn)
runner.set_respawn(False)
try:
runner.run()
except KeyboardInterrupt:
runner.stop()
if __name__ == '__main__':
run_cmd()
|
from torch.utils import data
from abc import ABC, abstractmethod
from abc import ABC, abstractmethod
from torch.utils import data
class BaseDataset(data.Dataset, ABC):
def __init__(self, source, target):
self.source = source
self.target = target
self.X = None
self.y = None
self.features = None
self.load_dataset()
# Use TextBlob
@abstractmethod
def load_dataset(self):
pass
def __len__(self):
pass
def __getitem__(self, item):
pass |
###########################################
# Let's Have Some Fun
# File Name: 123.py
# Author: Weilin Liu
# Mail: liuweilin17@qq.com
# Created Time: Sat Oct 19 17:09:01 2019
###########################################
#coding=utf-8
#!/usr/bin/python
# 123. Best Time to Buy and Sell Stock III
class Solution:
def maxProfit1(self, prices: List[int]) -> int:
# two transactions
firstBuy, firstSell = float('inf'), 0
secondBuy, secondSell = float('inf'), 0
for p in prices:
firstBuy = min(firstBuy, p)
firstSell = max(firstSell, p-firstBuy)
secondBuy = min(secondBuy, p-firstSell)
secondSell = max(secondSell, p-secondBuy)
return secondSell
def maxProfit2(self, prices: List[int]) -> int:
K = 2 # number of transactions
N = len(prices)
if N == 0 or N == 1: return 0
g = [0 for _ in range(K+1)]
f = [0 for _ in range(K+1)]
for i in range(1, N):
tmp = f[0]
for j in range(1, K+1):
g[j] = max(g[j], tmp) + prices[i] - prices[i-1]
tmp = f[j] # all use old f
f[j] = max(f[j], g[j])
return f[K]
|
from flask_security.forms import RegisterForm
from wtforms import StringField, validators
# registration form
class RegistrationForm(RegisterForm):
first_name = StringField('first_name', [validators.length(min=2, max=50)])
second_name = StringField('second_name', [validators.length(min=2, max=50)])
phone_number = StringField('phone_number', [validators.length(min=10, max=12)])
username = StringField('username', [validators.length(min=6, max=30)])
|
import os
begining = "program_files += [('"
ending = ", 'DATA')]"
with open("imgScanSpec.txt", "w" ) as textFile:
path = os.path.join(os.getcwd(), "img\\")
pathForPrint = path.replace("\\", "\\\\")
for file in os.listdir(path):
if file.endswith(".png"):
fullPath = pathForPrint + file
textFile.write( begining + file + "', '" + fullPath + "'" + ending + "\n")
|
#TASK-10:
#1)Write a Python program for all the cases which can check a string contains only a certain set of characters (in this case a-z, A-Z and 0-9).
def check(test_str):
import re
pattern = r'[a-zA-Z0-9.]'
if re.search(pattern, test_str):
print('valid')
else:
print('Invalid')
check(test_str='raghavi94')
#2)Write a Python program that matches a word containing 'ab'.
import re
txt="abrakadabra"
if re.search("ab",txt):
print('Matched')
else:
print('Not matched')
#3)Write a Python program to check for a number at the end of a word/sentence.
import re
string="BestEnlistDay10"
pattern='(\d+)$'
x=re.search(pattern,string)
if x:
print('String ends with a number')
else:
print('String does not end with a number')
#4)Write a Python program to search the numbers (0-9) of length between 1 to 3 in a given string
import re
results = re.finditer(r"([0-9]{1,3})", "123 456 789")
print("Number of length 1 to 3")
for i in results:
print(i.group(0))
#5)Write a Python program to match a string that contains only uppercase letters
import re
string=str(input('Enter valid string'))
pattern='^[A-Z]+$'
if re.search(pattern,string):
print('Valid string')
else:
print('Invalid string')
|
#from conta import Conta
#funciona passar um objeto no init de uma classe python, mesmo sem necessidade de importar para o arquivo
class Correntista:
def __init__(self, nome, cpf, Conta):
self.__cpf = cpf
self.__nome = nome
#limite está em conta não em correntista
#self.__limite = limite
self.__conta = Conta
def getCpf(self):
return self.__cpf
def setCpf(cpf):
self.__cpf = cpf
def getNome(self):
return self.__nome
def setNome(nome):
self.__nome = nome
#tá tudo tranquilo enquanto não tá usando esse metodo...
def addConta(self):
self.__conta = Conta()
def getConta(self):
if(self.__conta == None):
raise RuntimeError("Cliente não possui conta no banco")
return self.__conta
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
import logging
_logger = logging.getLogger(__name__)
class o2netQuotationItem(models.Model):
_name = 'o2net.quotation.item'
_description = "o2net - Quotation item"
@api.depends('unit_price', 'quantity')
def _compute_total_price(self):
_logger.debug("_compute_total_price: {0}".format(str(len(self))))
for line in self:
if line.quantity:
line.total_price = line.unit_price * line.quantity
@api.depends('pricelist_item_id')
def _compute_unit_price(self):
_logger.debug("_compute_unit_price: " + str(len(self)))
for line in self:
line.unit_price = line.pricelist_item_id.price
unit_price = fields.Float(compute=_compute_unit_price, string='Unit price', store=True, digits=(10, 2))
total_price = fields.Float(compute=_compute_total_price, string='Total price', store=True, digits=(10, 2))
quantity = fields.Float(string='Quantity', digits=(5, 2), required=True)
quotation_id = fields.Many2one('o2net.quotation', string='Quotation', required=True, ondelete='cascade')
pricelist_item_id = fields.Many2one('o2net.pricelist.item', string='Price list item', required=True)
item_unit_of_measure = fields.Selection(related='pricelist_item_id.unit_of_measure', string='Measure unit',
stored=False)
item_description = fields.Text(related='pricelist_item_id.description', string='Description', stored=False)
item_is_package = fields.Boolean(related='pricelist_item_id.is_package', string='Package', stored=True)
name = fields.Char(related='pricelist_item_id.name', string='Name')
code = fields.Char(related='pricelist_item_id.code', string='Code')
currency_id = fields.Many2one(related='pricelist_item_id.currency_id', string="Currency")
|
import pygauth, sys
from googleapiclient.discovery import build
if len(sys.argv) == 1:
print('Please provide a channel id')
exit()
creds = pygauth.get_user_creds_file('credentials.json', ['youtube'])
youtube = build('youtube', 'v3', credentials=creds)
resp = {'nextPageToken': None}
items = []
x = 0
while 'nextPageToken' in resp:
resp = youtube.search().list(part='id', channelId=sys.argv[1], maxResults=50, pageToken=resp['nextPageToken'], type='video', safeSearch='none').execute()
x += len(resp['items'])
items.extend(resp['items'])
print(f'\r{x} videos found', end='')
print('\nvideo ids:\n')
for item in items:
print(item['id']['videoId']) |
#
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
"""
Simple handshake tests using gnutls-cli
"""
import argparse
import os
import sys
import ssl
import socket
import subprocess
import itertools
import multiprocessing
from os import environ
from multiprocessing.pool import ThreadPool
from s2n_test_constants import *
def try_gnutls_handshake(endpoint, port, priority_str, mfl_extension_test, enter_fips_mode=False):
# Fire up s2nd
s2nd_cmd = ["../../bin/s2nd", str(endpoint), str(port)]
s2nd_ciphers = "test_all"
if enter_fips_mode == True:
s2nd_ciphers = "test_all_fips"
s2nd_cmd.append("--enter-fips-mode")
s2nd_cmd.append("-c")
s2nd_cmd.append(s2nd_ciphers)
if mfl_extension_test:
s2nd_cmd.append("--enable-mfl")
s2nd = subprocess.Popen(s2nd_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Make sure it's running
s2nd.stdout.readline()
gnutls_cmd = ["gnutls-cli", "--priority=" + priority_str,"--insecure", "-p " + str(port), str(endpoint)]
if mfl_extension_test:
gnutls_cmd.append("--recordsize=" + str(mfl_extension_test))
# Fire up gnutls-cli, use insecure since s2nd is using a dummy cert
gnutls_cli = subprocess.Popen(gnutls_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
# Write the priority str towards s2nd. Prepend with the 's2n' string to make sure we don't accidently match something
# in the gnutls-cli handshake output
written_str = "s2n" + priority_str
gnutls_cli.stdin.write((written_str + "\n").encode("utf-8"))
gnutls_cli.stdin.flush()
# Read it
found = 0
for line in range(0, 50):
output = s2nd.stdout.readline().decode("utf-8")
if output.strip() == written_str:
found = 1
break
if found == 0:
return -1
# Write the cipher name from s2n
s2nd.stdin.write((written_str + "\n").encode("utf-8"))
s2nd.stdin.flush()
found = 0
for line in range(0, 50):
output = gnutls_cli.stdout.readline().decode("utf-8")
if output.strip() == written_str:
found = 1
break
if found == 0:
return -1
gnutls_cli.kill()
gnutls_cli.wait()
s2nd.kill()
s2nd.wait()
return 0
def handshake(endpoint, port, cipher_name, ssl_version, priority_str, digests, mfl_extension_test, fips_mode):
ret = try_gnutls_handshake(endpoint, port, priority_str, mfl_extension_test, fips_mode)
prefix = ""
if mfl_extension_test:
prefix = "MFL: %-10s Cipher: %-10s Vers: %-10s ... " % (mfl_extension_test, cipher_name, S2N_PROTO_VERS_TO_STR[ssl_version])
elif len(digests) == 0:
prefix = "Cipher: %-30s Vers: %-10s ... " % (cipher_name, S2N_PROTO_VERS_TO_STR[ssl_version])
else:
# strip the first nine bytes from each name ("RSA-SIGN-")
digest_string = ':'.join([x[9:] for x in digests])
prefix = "Digests: %-40s Vers: %-10s ... " % (digest_string, S2N_PROTO_VERS_TO_STR[ssl_version])
suffix = ""
if ret == 0:
if sys.stdout.isatty():
suffix = "\033[32;1mPASSED\033[0m"
else:
suffix = "PASSED"
else:
if sys.stdout.isatty():
suffix = "\033[31;1mFAILED\033[0m"
else:
suffix = "FAILED"
print(prefix + suffix)
return ret
def create_thread_pool():
threadpool_size = multiprocessing.cpu_count() * 2 #Multiply by 2 since performance improves slightly if CPU has hyperthreading
print("\n\tCreating ThreadPool of size: " + str(threadpool_size))
threadpool = ThreadPool(processes=threadpool_size)
return threadpool
def main():
parser = argparse.ArgumentParser(description='Runs TLS server integration tests against s2nd using gnutls-cli')
parser.add_argument('host', help='The host for s2nd to bind to')
parser.add_argument('port', type=int, help='The port for s2nd to bind to')
parser.add_argument('--libcrypto', default='openssl-1.1.0', choices=['openssl-1.0.2', 'openssl-1.0.2-fips', 'openssl-1.1.0', 'openssl-1.1.x-master', 'libressl'],
help="""The Libcrypto that s2n was built with. s2n supports different cipher suites depending on
libcrypto version. Defaults to openssl-1.1.0.""")
args = parser.parse_args()
# Retrieve the test ciphers to use based on the libcrypto version s2n was built with
test_ciphers = S2N_LIBCRYPTO_TO_TEST_CIPHERS[args.libcrypto]
host = args.host
port = args.port
fips_mode = False
if environ.get("S2N_TEST_IN_FIPS_MODE") is not None:
fips_mode = True
print("\nRunning s2nd in FIPS mode.")
print("\nRunning GnuTLS handshake tests with: " + os.popen('gnutls-cli --version | grep -w gnutls-cli').read())
for ssl_version in [S2N_SSLv3, S2N_TLS10, S2N_TLS11, S2N_TLS12]:
if ssl_version == S2N_SSLv3 and fips_mode == True:
# FIPS does not permit the use of SSLv3
continue
print("\n\tTesting ciphers using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
threadpool = create_thread_pool()
port_offset = 0
results = []
for cipher in test_ciphers:
# Use the Openssl name for printing
cipher_name = cipher.openssl_name
cipher_priority_str = cipher.gnutls_priority_str
cipher_vers = cipher.min_tls_vers
if ssl_version < cipher_vers:
continue
# Add the SSL version to make the cipher priority string fully qualified
complete_priority_str = cipher_priority_str + ":+" + S2N_PROTO_VERS_TO_GNUTLS[ssl_version] + ":+SIGN-ALL"
async_result = threadpool.apply_async(handshake, (host, port + port_offset, cipher_name, ssl_version, complete_priority_str, [], 0, fips_mode))
port_offset += 1
results.append(async_result)
threadpool.close()
threadpool.join()
for async_result in results:
if async_result.get() != 0:
return -1
# Produce permutations of every accepted signature alrgorithm in every possible order
signatures = ["SIGN-RSA-SHA1", "SIGN-RSA-SHA224", "SIGN-RSA-SHA256", "SIGN-RSA-SHA384", "SIGN-RSA-SHA512"];
for size in range(1, len(signatures) + 1):
print("\n\tTesting ciphers using signature preferences of size: " + str(size))
threadpool = create_thread_pool()
port_offset = 0
results = []
for permutation in itertools.permutations(signatures, size):
# Try an ECDHE cipher suite and a DHE one
for cipher in filter(lambda x: x.openssl_name == "ECDHE-RSA-AES128-GCM-SHA256" or x.openssl_name == "DHE-RSA-AES128-GCM-SHA256", ALL_TEST_CIPHERS):
complete_priority_str = cipher.gnutls_priority_str + ":+VERS-TLS1.2:+" + ":+".join(permutation)
async_result = threadpool.apply_async(handshake,(host, port + port_offset, cipher.openssl_name, S2N_TLS12, complete_priority_str, permutation, 0, fips_mode))
port_offset += 1
results.append(async_result)
threadpool.close()
threadpool.join()
for async_result in results:
if async_result.get() != 0:
return -1
print("\n\tTesting handshakes with Max Fragment Length Extension")
for ssl_version in [S2N_TLS10, S2N_TLS11, S2N_TLS12]:
print("\n\tTesting Max Fragment Length Extension using client version: " + S2N_PROTO_VERS_TO_STR[ssl_version])
threadpool = create_thread_pool()
port_offset = 0
results = []
for mfl_extension_test in [512, 1024, 2048, 4096]:
cipher = test_ciphers[0]
complete_priority_str = cipher.gnutls_priority_str + ":+" + S2N_PROTO_VERS_TO_GNUTLS[S2N_TLS10] + ":+" + ":+".join(permutation)
async_result = threadpool.apply_async(handshake,(host, port + port_offset, cipher.openssl_name, ssl_version, complete_priority_str, [], mfl_extension_test, fips_mode))
port_offset += 1
results.append(async_result)
threadpool.close()
threadpool.join()
for async_result in results:
if async_result.get() != 0:
return -1
if __name__ == "__main__":
sys.exit(main())
|
import unit_generation
import math
def DataRegBank(numUnits):
f = open('Data_Reg_Bank.v', 'w')
f.write(unit_generation.header('DataRegBank', 'For '+str(numUnits)+' units'))
#module declaration
f.write('module DataRegBank(')
for i in range(numUnits):
f.write('in{}, '.format(i))
f.write('dataIn, address, writeAddress, writeAll, reset, clk')
for i in range(numUnits):
f.write(', out{}'.format(i))
f.write(');\n')
#input declaration
f.write('input [31:0] ')
for i in range(numUnits):
f.write('in{}, '.format(i))
f.write('dataIn;\n')
addressSize = int(math.ceil(math.log(numUnits)/math.log(2)))-1
f.write('input [{}:0] address;\n'.format(addressSize))
f.write('input writeAddress, writeAll, reset, clk;\n')
#output declaration
f.write('output reg [31:0] out0')
for i in range(1,numUnits):
f.write(', out{}'.format(i))
f.write(';\n\n')
#main block
f.write('always @ (posedge clk) begin\n')
f.write('\tif (reset == 1) begin \n')
for i in range (numUnits):
f.write('\t\tout{} <= 0;\n'.format(i))
f.write('\tend\n')
f.write(' else if(writeAddress == 1) begin\n')
f.write(' case(address)\n')
for i in range(numUnits):
f.write(' {}: begin\n'.format(i))
for j in range(numUnits):
if(i==j):
f.write(' out{} <= dataIn;\n'.format(j))
else:
f.write(' out{0} <= out{0};\n'.format(j))
f.write(' end\n')
f.write(' default: begin\n')
for i in range(numUnits):
f.write(' out{0} <= out{0};\n'.format(i))
f.write(' end\n')
f.write(' endcase\n')
f.write(' end else if (writeAll == 1) begin\n')
for i in range(numUnits):
f.write(' out{0} <= in{0};\n'.format(i))
f.write(' end\n')
f.write('end\n')
f.write('endmodule\n')
def DataRegBankTB(numUnits):
f = open('Data_Reg_Bank_TB.v', 'w')
f.write('`timescale 1ns / 1ps\n')
f.write(unit_generation.header('DataRegBank Test Fixture', ' '))
f.write('module Data_Reg_Bank_TB();\n')
f.write('reg [31:0] ')
for i in range(numUnits):
f.write('in{}, '.format(i))
f.write('dataIn;\n')
addressSize = int(math.ceil(math.log(numUnits)/math.log(2)))-1
f.write('reg [{}:0] address;\n'.format(addressSize))
f.write('reg writeAddress, writeAll, clk;\n')
f.write('wire [31:0] out0')
for i in range(1,numUnits):
f.write(', out{}'.format(i))
f.write(';\n\n')
f.write('DataRegBank UUT(')
for i in range(numUnits):
f.write('in{}, '.format(i))
f.write('dataIn, address, writeAddress, writeAll, clk')
for i in range(numUnits):
f.write(', out{}'.format(i))
f.write(');\n\n')
f.write('initial begin\n')
f.write('clk = 0; dataIn = 0; address = 0; writeAddress = 0;\n')
for i in range(numUnits):
f.write('in{} = 0; '.format(i))
f.write('writeAll = 0; #5;\n')
for i in range(numUnits):
f.write('in{} = 0; '.format(i))
f.write('writeAll = 1; #5;\n')
for i in range(numUnits):
f.write('in{} = 0; '.format(i))
f.write('writeAll = 0; #5;\n\n')
for i in range(numUnits):
f.write('dataIn = {0}; address = {0}; writeAddress = 0; #5;\n'.format(i))
f.write('dataIn = {0}; address = {0}; writeAddress = 1; #5;\n'.format(i))
f.write('dataIn = {0}; address = {0}; writeAddress = 0; #5;\n\n'.format(i))
for i in range(numUnits):
f.write('in{} = {}; '.format(i,10+i))
f.write('writeAll = 0; #5;\n')
for i in range(numUnits):
f.write('in{} = {}; '.format(i,10+i))
f.write('writeAll = 1; #5;\n')
for i in range(numUnits):
f.write('in{} = {}; '.format(i,10+i))
f.write('writeAll = 0; #5;\n\n')
f.write('$stop;\n')
f.write('end\n\n')
f.write('always #1 clk = ~clk;\n\n')
f.write('endmodule')
|
from gym.envs.registration import register
register(
id='pvz-env-v0',
entry_point='gym_pvz.envs:PVZEnv'
)
register(
id='pvz-env-v1',
entry_point='gym_pvz.envs:PVZEnv_V1'
)
register(
id='pvz-env-v01',
entry_point='gym_pvz.envs:PVZEnv_V01'
)
register(
id='pvz-env-v2',
entry_point='gym_pvz.envs:PVZEnv_V2'
) |
with open("puzzle_input.txt") as puzzle:
a = list(puzzle)
master = set()
def bag_of_bags(bags):
parents = []
for bag in bags:
bag_color = bag.split(" bags contain ")[0]
for g in a:
if bag_color in g:
if not g.startswith(bag_color):
parents.append(g)
master.update(parents)
return parents
bags = ["shiny gold"]
while len(bags) != 0:
bags = bag_of_bags(bags)
print(len(master))
|
import kivy
kivy.require('1.0.6')
from glob import glob
from random import randint
from os.path import join, dirname
from kivy.app import App
from kivy.logger import Logger
from kivy.uix.scatter import Scatter
from kivy.properties import StringProperty
import numpy as np
from PIL import Image
from skimage import io,data,filters
import cv2
# Load two images
img1 = cv2.imread('/Users/amen/data/Learning/PythonTest/images/lean.jpg')
img2 = cv2.imread('/Users/amen/data/Learning/PythonTest/images/logo.png')
# I want to put logo on top-left corner, So I create a ROI
rows,cols,channels = img2.shape
roi = img1[0:rows, 0:cols ]
# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of logo in ROI
img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(img2,img2,mask = mask)
# Put logo in ROI and modify the main image
dst = cv2.add(img1_bg,img2_fg)
img1[0:rows, 0:cols ] = dst
# 2D卷积
# image_logo = img2.copy()
# kernel = np.ones((5,5),np.float32)/25
# dst = cv2.filter2D(image_logo,-1,kernel)
# blur = cv2.blur(image_logo,(5,5))
# gauss = cv2.GaussianBlur(image_logo,(5,5),0)
# median = cv2.medianBlur(image_logo,5)
# bilateral = cv2.bilateralFilter(image_logo,9,75,75)
# kernel = np.ones((3,3),np.uint8)
# erosion = cv2.erode(image_logo,kernel,iterations = 1)
img_th = cv2.imread('/Users/amen/data/Learning/PythonTest/images/th.jpg')
kernel = np.ones((3,3),np.uint8)
closing = cv2.morphologyEx(img_th, cv2.MORPH_CLOSE, kernel)
# cv2.imshow('res12',closing)
bg_color = [197, 102, 6]
threshold = 3000
def calc_diff(pixel):
return (pixel[0]-bg_color[0])**2 + (pixel[1]-bg_color[1])**2 + (pixel[2]-bg_color[2])**2
def remove_bg():
image_path = '/Users/amen/data/Learning/PythonTest/images/th.jpg'
logo = cv2.imread(image_path)
logo = cv2.cvtColor(logo, cv2.COLOR_BGR2BGRA) #将图像转成带透明通道的BGRA格式
h, w = logo.shape[0:2]
for i in range(h):
for j in range(w):
if calc_diff(logo[i][j]) < threshold:
#若果logo[i][j]为背景,将其颜色设为白色,且完全透明
logo[i][j][0] = 255
logo[i][j][1] = 255
logo[i][j][2] = 255
logo[i][j][3] = 0
# cv2.imshow('res13',logo)
return logo
# remove_bg()
# 查找图片轮廓
def preprocess(save_dir,gray):
# 1. Sobel算子,x方向求梯度
sobel = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize = 3)
# 2. 二值化
ret, binary = cv2.threshold(sobel, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
#gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,190,255,cv2.THRESH_BINARY)
cv2.imwrite(save_dir+"thresh.jpg", thresh)
# 3. 膨胀和腐蚀操作的核函数
element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (30, 9))
element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (24, 6))
# 4. 膨胀一次,让轮廓突出
dilation = cv2.dilate(binary, element2, iterations = 1)
# 5. 腐蚀一次,去掉细节,如表格线等。注意这里去掉的是竖直的线
erosion = cv2.erode(dilation, element1, iterations = 1)
# 6. 再次膨胀,让轮廓明显一些
dilation2 = cv2.dilate(erosion, element2, iterations = 3)
# 7. 存储中间图片
cv2.imwrite(save_dir+"binary.png", binary)
cv2.imwrite(save_dir+"dilation.png", dilation)
cv2.imwrite(save_dir+"erosion.png", erosion)
cv2.imwrite(save_dir+"dilation2.png", dilation2)
return dilation2
def findTextRegion(img):
region = []
# 1. 查找轮廓
contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 2. 筛选那些面积小的
for i in range(len(contours)):
cnt = contours[i]
# 计算该轮廓的面积
area = cv2.contourArea(cnt)
# 面积小的都筛选掉
if(area < 50000):
continue
# 轮廓近似,作用很小
epsilon = 0.001 * cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
# 找到最小的矩形,该矩形可能有方向
rect = cv2.minAreaRect(cnt)
print("rect is: ")
print(rect)
# box是四个点的坐标
box = cv2.boxPoints(rect)
box = np.int0(box)
# 计算高和宽
height = abs(box[0][1] - box[2][1])
width = abs(box[0][0] - box[2][0])
# # 筛选那些太细的矩形,留下扁的
# if(height > width * 1.2):
# continue
region.append(box)
print("region is: ")
print(region)
return region
def detect(save_dir,img):
# 1. 转化成灰度图
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 2. 形态学变换的预处理,得到可以查找矩形的图片
dilation = preprocess(save_dir,gray)
# 3. 查找和筛选文字区域
region = findTextRegion(dilation)
# 4. 用绿线画出这些找到的轮廓
for box in region:
cv2.drawContours(img, [box], 0, (255, 0, 0), 2)
# cv2.namedWindow("img", cv2.WINDOW_NORMAL)
cv2.imshow("img", img)
# 带轮廓的图片
cv2.imwrite(save_dir+"contours.png", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# cv2.waitKey(0)
# cv2.destroyAllWindows()
class Picture(Scatter):
source = StringProperty(None)
def onpress(self):
print("key=1, val=2")
# 读取文件
imagePath = '/Users/amen/data/Learning/PythonTest/images/lean.jpg'
img = cv2.imread(imagePath)
save_dir = "./PythonTest/images/out/"
result = img.copy()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,190,255,cv2.THRESH_BINARY)
cv2.imwrite(save_dir+"thresh.jpg", thresh)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
opened = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
cv2.imwrite(save_dir+"opened.jpg", opened)
eroded = cv2.erode(opened, kernel)
cv2.imwrite(save_dir+"eroded.jpg", eroded)
contours, hierarchy = cv2.findContours(eroded,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
color = (0, 255, 0)
for c in contours:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(img, (x, y), (x + w, y + h), color, 1)
temp = result[y:(y + h), x:(x + w)]
# cv2.imwrite("/Users/amen/data/Learning/PythonTest/images/result/" + str(x) + ".jpg", temp)
cv2.imwrite(save_dir+"result.jpg", img)
detect(save_dir,img)
class PicturesApp(App):
def build(self):
# pil_im = Image.open("/Users/amen/data/Learning/PythonTest/images/th.jpg")
# print(pil_im.size,pil_im.format,pil_im.mode)
# pil_im.resize(200,200)
# pil_im.save("./images/th.jpg")
# pil_im = Image.open("./images/th.jpg")
# print(pil_im.size,pil_im.format,pil_im.mode)
# the root is created in pictures.kv
root = self.root
# get any files into images directory
curdir = dirname(__file__)
# for filename in glob(join(curdir, 'images', '*')):
filename = curdir+'/images/lean.jpg'
try:
# load the image
# picture = Picture(source=filename, rotation=randint(-30, 30))
picture = Picture(source=filename, rotation=0)
# add to the main field
root.add_widget(picture)
except Exception as e:
Logger.exception('Pictures: Unable to load <%s>' % filename)
if __name__ == '__main__':
PicturesApp().run()
|
"""
byceps.services.webhooks.service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from typing import Optional, Set
from ...database import db
from .models import OutgoingWebhook as DbOutgoingWebhook
from .transfer.models import OutgoingWebhook, WebhookID
def create_outgoing_webhook(
scope: str,
scope_id: Optional[str],
format: str,
url: str,
enabled: bool,
*,
text_prefix: Optional[str] = None,
) -> OutgoingWebhook:
"""Create an outgoing webhook."""
webhook = DbOutgoingWebhook(
scope, scope_id, format, url, enabled, text_prefix=text_prefix
)
db.session.add(webhook)
db.session.commit()
return _db_entity_to_outgoing_webhook(webhook)
def delete_outgoing_webhook(webhook_id: WebhookID) -> None:
"""Delete the outgoing webhook."""
db.session.query(DbOutgoingWebhook) \
.filter_by(id=webhook_id) \
.delete()
db.session.commit()
def find_enabled_outgoing_webhook(
scope: str, scope_id: str, format: str
) -> Optional[OutgoingWebhook]:
"""Return the configuration for an enabled outgoing webhook with the
given scope and format.
"""
webhook = db.session.query(DbOutgoingWebhook) \
.filter_by(scope=scope) \
.filter_by(scope_id=scope_id) \
.filter_by(format=format) \
.one_or_none()
if webhook is None:
return None
return _db_entity_to_outgoing_webhook(webhook)
def _db_entity_to_outgoing_webhook(
webhook: DbOutgoingWebhook,
) -> OutgoingWebhook:
return OutgoingWebhook(
webhook.id,
webhook.scope,
webhook.scope_id,
webhook.format,
webhook.text_prefix,
webhook.url,
webhook.enabled,
)
|
class Solution:
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
if n==0:
return 0
if n==1:
return 1
if n==2:
return 2
n1=1
n2=2
for i in range(3,n+1):
n2,n1=n1+n2,n2
return n2 |
import tensorflow as tf
SCALE = 1.0
def block_cole(layer_num, input, num_kernels, is_train):
conv1 = tf.layers.conv3d(
inputs=input,
filters=num_kernels,
kernel_size=[3, 3, 3],
strides=[1, 1, 1],
padding='same',
use_bias=True,
activation=tf.nn.relu,
name='layer{}_aconv'.format(layer_num))
print(conv1.shape)
conv2 = tf.layers.conv3d(
inputs=conv1,
filters=num_kernels,
kernel_size=[3, 3, 3],
strides=[1, 1, 1],
padding='same',
use_bias=True,
activation=None,
name='layer{}_bconv'.format(layer_num))
print(conv2.shape)
bn = tf.nn.relu(tf.layers.batch_normalization(conv2, training=is_train))
pool = tf.layers.max_pooling3d(inputs=bn, pool_size=[2, 2, 2], strides=2, name='layer{}_pool'.format(layer_num))
print(pool.shape)
return pool
def conv_net_cole(features):
with tf.variable_scope('NET'):
input_layer = features["x"] / SCALE
output_layer = features["y"]
is_train = features["is_train"]
data = input_layer # tf.reshape(input_layer, [-1, 256, 256, 256, 1])
l1 = block_cole(1, data, 8, is_train)
l2 = block_cole(2, l1, 16, is_train)
l3 = block_cole(3, l2, 32, is_train)
l4 = block_cole(4, l3, 64, is_train)
l5 = block_cole(5, l4, 128, is_train)
pool_flat = tf.reshape(l5, [-1, 4 * 7 * 7 * 128])
dropout_pool = tf.layers.dropout(pool_flat, rate=0.4, training=is_train, name='dropout_pool')
print("now fc...")
fc1 = tf.layers.dense(inputs=dropout_pool, units=100, use_bias=True, activation=None, name='layer1_fc')
fc2 = tf.layers.dense(inputs=fc1, units=20, use_bias=True, activation=None, name='layer2_fc')
fc3 = tf.layers.dense(inputs=fc2, units=output_layer.shape[1], use_bias=True, activation=None, name='layer3_fc')
ret = tf.identity(fc3, name='model')
return ret |
from django.db import models
# 友站模块的models
class Category(models.Model):
name = models.CharField(max_length=100,verbose_name='类别名称')
def __str__(self):
return self.name
class CoolSite(models.Model):
category = models.ForeignKey(Category,verbose_name='所属类别',on_delete=models.CASCADE)
url = models.URLField(verbose_name='站点地址')
name = models.CharField(max_length=100,verbose_name='站点名称')
description = models.TextField(blank=True,verbose_name='站点介绍')
created_on = models.DateTimeField(auto_now_add=True,verbose_name='添加时间')
def __str__(self):
return self.name |
#!/usr/bin/env python
import unittest
from dominion import Game, Card, Piles
import dominion.Card as Card
###############################################################################
class Card_Patron(Card.Card):
def __init__(self):
Card.Card.__init__(self)
self.cardtype = [Card.CardType.ACTION, Card.CardType.REACTION]
self.base = Card.CardExpansion.RENAISSANCE
self.desc = "+1 Villager; +2 Coin. When something causes you to reveal this, +1 Coffers."
self.name = "Patron"
self.cost = 4
self.coin = 2
def special(self, game, player):
player.villagers.add(1)
def hook_revealThisCard(self, game, player):
player.coffers.add(1)
###############################################################################
class Test_Patron(unittest.TestCase):
def setUp(self):
self.g = Game.TestGame(numplayers=1, initcards=["Patron"])
self.g.start_game()
self.plr = self.g.player_list(0)
self.card = self.g["Patron"].remove()
self.plr.add_card(self.card, Piles.HAND)
def test_play(self):
self.plr.play_card(self.card)
self.assertEqual(self.plr.coins.get(), 2)
self.assertEqual(self.plr.villagers.get(), 1)
def test_reveal(self):
num = self.plr.coffers.get()
self.plr.reveal_card(self.card)
self.assertEqual(self.plr.coffers.get(), num + 1)
###############################################################################
if __name__ == "__main__": # pragma: no cover
unittest.main()
# EOF
|
from pathlib import Path
class PathManager:
BASE_DIR: Path = Path(__file__).resolve().parents[2]
DATA: Path = BASE_DIR / "data"
ROOT_DIR: Path = BASE_DIR / 'punctuator'
TESTS: Path = ROOT_DIR / "tests"
SRC: Path = ROOT_DIR / "src"
CREDENTIALS: Path = ROOT_DIR / "credentials"
# data directory
RAW: Path = DATA / "raw"
INTERIM: Path = DATA / "interim"
PROCESSED: Path = DATA / "processed"
|
'''
Records.Normalize
Records.Normalize.Finance.
Records.Normalize.Finance.BeyondBanking
Records.Normalize.Finance.PaypalFaraja
'''
import os, copy, re
from Database import Database
from utils.CsvObject import *
from utils import Container
#===============================================================================
# Records.Normalize
#===============================================================================
class Normalize(Database):
#===========================================================================
# implementations
#===========================================================================
def combineOriginals(self): pass
def normalizeOriginals(self): pass
def recordsDerived(self): pass
#===========================================================================
# constructor
#===========================================================================
def __init__(self, website, camperid, day, dest, page, year, refresh):
Database.__init__(self, website, camperid, day, dest, page, year, refresh)
csv_combined = self.combineOriginals(self.folderIn(), self.folderOut())
self.csv_object = self.normalizeOriginals(csv_combined, self.folderOut())
self.website = website
return None
def doThis(self, row_in, rows_in, rows_out):
row_out = dict()
for field in rows_in.fieldnames:
if field in rows_out.fieldnames:
row_out[field] = row_in[field]
row_out['ordered_day'] = self.getOrderedDay(row_in['day'])
row_out['ordered_time'] = self.getOrderedTime(row_in['time'])
row_out['date'] = self.getDate(row_in['day'], YEAR_OF_CAMP)
convert = re.sub('1','yes', row_in['HS-dispenses'])
convert = re.sub('0','no', convert)
row_out['HS-dispenses'] = convert
convert = re.sub('1','yes', row_in['la-bus'])
convert = re.sub('0','no', convert)
row_out['la-bus'] = convert
return row_out
#===========================================================================
# combine original files with original format
#===========================================================================
def combineOriginals(self, folder_in, folder_out):
Container.clearFolder(folder_out)
file_out = folder_out+'OriginalsCombined.csv'
csv_folder = CsvFolder( folder_in,
self.originalFields(),
self.dialect(),
self.skipFirstRecord() )
csv_objects = FolderOfCsvObjects(file_out, csv_folder)
csv_originals = csv_objects.combineIntoOneCsvObject()
return csv_originals
#===========================================================================
# original columns appended with derived columns
#===========================================================================
def normalizeOriginalsOld(self, csv_combined_originals, folder_out):
normalized = self.recordsDerived(csv_combined_originals, folder_out)
if False: print "\n Normalized records are at", normalized.filename
return normalized
def normalizeOriginals(self, csv_combined_originals, folder_out):
rows_in = csv_combined_originals
fieldnames = self.usefulFields() + self.derivedFields()
rows_out = CsvObject(folder_out+'Derived.csv', fieldnames)
normalized = self.recordsDerived(rows_in, rows_out)
#normalized = self.sql.forEachRow(rows_in, rows_out)
if False: print "\n Normalized records are at", normalized.filename
return normalized
#===========================================================================
# recordsDerived
#===========================================================================
def recordsDerivedOld(self, rows_in, folder_out):
fieldnames = self.usefulFields() + self.derivedFields()
rows_out = CsvObject(folder_out+'Derived.csv', fieldnames)
rows_in.openRead(); rows_out.openWrite()
for row in rows_in.reader:
derived = dict()
for x in rows_out.fieldnames:
derived[x] = self.determineDerivedFields(x, row, derived)
rows_out.writer.writerow([derived[x] for x in rows_out.fieldnames])
rows_in.closeRead(); rows_out.closeWrite()
return rows_out
def recordsDerived(self, rows_in, rows_out):
rows_in.openRead(); rows_out.openWrite()
for row in rows_in.reader:
derived = self.determineDerivedFields(row, rows_in, rows_out)
rows_out.writer.writerow([derived[x] for x in rows_out.fieldnames])
rows_in.closeRead(); rows_out.closeWrite()
return rows_out
|
# STRAND SORT
# It is a recursive comparison based sorting technique which sorts in increasing order.
# It works by repeatedly pulling sorted sub-lists out of the list to be sorted and merging them
# with a result array.
# Algorithm:
# Create a empty strand (list) and append the first element to it popping it from the input array
# Compare this element with the rest of elements of the input array
# if a greater element is found then pop and append it to strand otherwise skip
# Now merge this array to the final output array
# Recur for remaining items in strand and input array.
# Utility Function to merge two arrays
def merge(arr1, arr2):
# list to store merged output
merged_list = []
# while there are elements in both arrays
while len(arr1) and len(arr2):
# the array having smaller first elements gets appended as the resultant array must be sorted
if arr1[0] < arr2[0]:
merged_list.append(arr1.pop(0))
else:
merged_list.append(arr2.pop(0))
# if the length of either of array is exhausted , merge the remaining part to
# the merge sublist
merged_list += arr1
merged_list += arr2
# return the merged list
return merged_list
# Function to return the strand (sorted sub-list)
def strand(arr):
# append the first element to the strand
s = [arr.pop(0)]
# initialise a pointer
i = 0
# while it is less then length
while i > len(arr):
# compare the input array elements to the last element of the strand
if arr[i] > s[-1]:
# if we found a greater element than s[-1] then pop it and append to the strand
s.append(arr.pop(i))
else:
# else increment
i += 1
# return the strand
return s
# Strand Sort Function
def strand_sort(arr):
# initialise the output array with the strand
output = strand(arr)
# while there are elements in the array
while len(arr):
# merge the strand and previous output list to create a new list
output = merge(output, strand(arr))
# return the sorted output
return output
# Driver Code
arr = [1, 6, 3, 8, 2, 0, 9]
print(strand_sort(arr))
# Time Complexity : O(n^2) [Worst]
# O(n*log(n)) [Average]
# Space Complexity : O(n)
# Stable : Yes
# Inplace : No
|
import string
def verify(isbn):
isbn = isbn.replace("-", "")
invalid_chars = string.ascii_uppercase.replace("X", "")
has_no_invalid_chars = all(not c in isbn for c in invalid_chars)
has_correct_length = len(isbn) == 10
has_correct_sum = sum(((10 - i) * int(c)
if c != "X" and c not in invalid_chars
else 10
for i, c in enumerate(isbn))
) % 11 == 0
return (has_no_invalid_chars and
has_correct_length and
has_correct_sum)
|
import matplotlib.pyplot as plt
import numpy as np
plt.figure(1)
#just plot x values in range 0, 10 step 0.5
x = np.array([-0.99768,
-0.69574,
-0.40373,
-0.10236,
0.22024,
0.47742,
0.82229])
m = len(x)
#generate y values x^2-10x+25
y = np.array([2.0885,
1.1646,
0.3287,
0.46013,
0.44808,
0.10013,
-0.32952])
plt.scatter(x, y, marker=r'$\clubsuit$')
plt.hold(True)
#convert to vectors
x_train = np.array([np.ones(len(x)), x, np.power(x, 2), np.power(x, 3), np.power(x, 4), np.power(x, 5)])
y_train = (y[:, np.newaxis])
#h = theta0*x0 + theta1*x1 + theta2*x2 (x0=1 and x2=x^2)
theta = np.array(np.zeros((x_train.shape[0], 1)))
i = 0
alpha = 0.001
lamda = 10
preJ = 0
while (True):
i = i + 1
h = theta.T.dot(x_train)
error = h.T - y_train
J = (error.T.dot(error) + lamda*theta[1::,0].T.dot(theta[1::,0]))/2*m; print(J)
if(preJ == 0):
preJ = J
if(preJ < J):
break
else:
preJ = J
tmp = alpha*x_train.dot(error)/m
tmp2 = tmp[1::,0] + alpha*lamda*theta[1::,0]/m
theta[0,0::] = theta[0,0::] - tmp[0,0::]
theta[1::,0] = theta[1::,0] - tmp2
print(theta)
y_pred = theta.T.dot(x_train)
plt.plot(x, y_pred.T)
plt.show()
|
from scrapy.http import Request
from scrapy.selector import Selector
from scrapy.spider import Spider
from yaome.items import YaomeItem
class Yaome_spider(Spider):
name = 'yaome'
allowed_domains = ['yaohuo.me']
cookies = {
'GUID':'9571651818064021',
'ASP.NET_SessionId':'adkuv1yppgpfhkuuyg2nwn55',
'GET35188':'',
'sidyaohuo':'08F942A33923B98_C88_05187_30480_31001-2-0-0-0-0'
}
def start_requests(self):
return [Request('http://www.yaohuo.me/',cookies=self.cookies,callback=self.parse)]
def parse(self,response):
sel = Selector(response)
ne = sel.xpath('/html/body/div[7]/a[2]/@href').extract()[0]
yield Request('http://www.yaohuo.me{}'.format(ne),cookies=self.cookies,callback=self.tw_parse)
def tw_parse(self,response):
sel = Selector(response)
links = sel.xpath('/html/body/div/a[1]')
for le in links:
item = YaomeItem()
item['link'] = le.xpath('.//@href').extract()[0]
item['name'] = le.xpath('.//text()').extract()[0]
yield item
|
# -*- coding: utf-8
"""Integration tests for command response handling."""
# pylint: disable=missing-docstring,too-few-public-methods
from itertools import imap
from textwrap import dedent
from twisted.internet.defer import inlineCallbacks, fail, succeed
from twisted.trial.unittest import TestCase
from ...connection import MAX_REPLY_LENGTH
from ...message import Message, MessageType, collapse
from ...plugin import EventPlugin, UserVisibleError
from ...plugins.more import Default as More
from ..helpers import ConnectionTestMixin, CommandTestMixin, OutgoingPlugin
class CommandMonitorMixin(CommandTestMixin):
def setUp(self):
super(CommandMonitorMixin, self).setUp()
self.connection.settings.set('command_prefixes', ['!'])
self.outgoing = self.connection.settings.enable(
OutgoingPlugin.name, [])
self.connection.joined('#foo')
def more(self, **kwargs):
request = self.command_message(
'', subaction='more', target=self.other_users[0].nick, **kwargs)
deferred = self.connection.buffer_and_reply(
More().on_command(request), request)
deferred.addErrback(self.connection.reply_from_error, request)
return deferred
#
# Basic commands
#
class BasicCommand(EventPlugin):
quote = collapse("""Deliquatue volut pulvinar feugiat eleifend
quisque suspendisse faccummy etuerci; vullandigna praestie hac
consectem ipisim esequi. Facidui augiam proin nisit diamet ing.
Incinim iliquipisl ero alit amconsecte adionse loborer
odionsequip sagittis, iuscipit hent dipiscipit. Molore proin
consecte min amcommo; lobortio platea loboreet il consequis. Lan
ullut corem esectem vercilisit delent exer, feu inciduipit feum
in augait vullam. Tortor augait dignissim.""")
def on_command(self, msg):
args = msg.content.split()
exc_class = UserVisibleError if 'visible' in args else Exception
exc = exc_class('Lorem ipsum.')
if 'defer' in args:
if 'failure' in args:
return fail(exc)
return succeed(self.quote)
if 'failure' in args:
raise exc
return self.quote
class BasicCommandTestCase(CommandMonitorMixin, TestCase):
command_class = BasicCommand
def assert_success(self, deferred_result=None):
self.assertEqual(self.outgoing.last_seen.action, MessageType.privmsg)
self.assertEqual(self.outgoing.last_seen.venue, '#foo')
self.assertEqual(self.outgoing.last_seen.content, collapse("""
\x0314party3: Deliquatue volut pulvinar feugiat eleifend
quisque suspendisse faccummy etuerci; vullandigna praestie
hac consectem ipisim esequi. Facidui augiam proin nisit
diamet ing. Incinim iliquipisl ero alit amconsecte adionse
loborer odionsequip sagittis, (+1 more)"""))
self.more(venue='#foo')
self.assertEqual(self.outgoing.last_seen.content, collapse("""
\x0314{}: iuscipit hent dipiscipit. Molore proin consecte
min amcommo; lobortio platea loboreet il consequis. Lan
ullut corem esectem vercilisit delent exer, feu inciduipit
feum in augait vullam. Tortor augait dignissim."""
.format(self.other_users[0].nick)))
def assert_success_private(self):
self.assertEqual(self.outgoing.last_seen.action, MessageType.notice)
self.assertEqual(self.outgoing.last_seen.venue,
self.other_users[0].nick)
self.assertEqual(self.outgoing.last_seen.content, collapse("""
Deliquatue volut pulvinar feugiat eleifend quisque
suspendisse faccummy etuerci; vullandigna praestie hac
consectem ipisim esequi. Facidui augiam proin nisit diamet
ing. Incinim iliquipisl ero alit amconsecte adionse loborer
odionsequip sagittis, (+1 more)"""))
self.more(venue=self.connection.nickname)
self.assertEqual(self.outgoing.last_seen.content, collapse("""
iuscipit hent dipiscipit. Molore proin consecte min amcommo;
lobortio platea loboreet il consequis. Lan ullut corem
esectem vercilisit delent exer, feu inciduipit feum in
augait vullam. Tortor augait dignissim."""))
def assert_hidden_error(self, deferred_result=None):
self.assertEqual(self.outgoing.last_seen.action, MessageType.privmsg)
self.assertEqual(self.outgoing.last_seen.venue, '#foo')
self.assertEqual(self.outgoing.last_seen.content, collapse("""
\x0314{}: Command \x02basiccommand\x02 encountered an error.
""".format(self.other_users[0].nick)))
self.assertLoggedErrors(1)
def assert_visible_error(self, deferred_result=None):
self.assertEqual(self.outgoing.last_seen.action, MessageType.privmsg)
self.assertEqual(self.outgoing.last_seen.venue, '#foo')
self.assertEqual(self.outgoing.last_seen.content, collapse("""
\x0314{}: Lorem ipsum.""".format(self.other_users[0].nick)))
def test_empty_buffer(self):
self.more(venue='#foo')
self.assertEqual(self.outgoing.last_seen.action, MessageType.privmsg)
self.assertEqual(self.outgoing.last_seen.venue, '#foo')
self.assertEqual(self.outgoing.last_seen.content, collapse("""
\x0314{}: No results.""".format(self.other_users[0].nick)))
def test_synchronous_success(self):
self.receive('PRIVMSG #foo :!basiccommand > party3')
self.assert_success()
def test_synchronous_success_private(self):
self.receive('PRIVMSG {} :basiccommand > party3'.format(
self.connection.nickname))
self.assert_success_private()
def test_synchronous_success_private_with_prefix(self):
self.receive('PRIVMSG {} :!basiccommand > party3'.format(
self.connection.nickname))
self.assert_success_private()
def test_synchronous_hidden_error(self):
self.receive('PRIVMSG #foo :!basiccommand failure > party3')
self.assert_hidden_error()
def test_synchronous_visible_error(self):
self.receive('PRIVMSG #foo :!basiccommand failure visible > party3')
self.assert_visible_error()
def test_deferred_success(self):
d = self.receive('PRIVMSG #foo :!basiccommand defer > party3')
d.addCallback(self.assert_success)
return d
def test_deferred_hidden_error(self):
d = self.receive('PRIVMSG #foo :!basiccommand defer failure > party3')
d.addCallback(self.assert_hidden_error)
return d
def test_deferred_visible_error(self):
d = self.receive(
'PRIVMSG #foo :!basiccommand defer failure visible > party3')
d.addCallback(self.assert_visible_error)
return d
#
# Commands returning iterators
#
class DeferredIterator(object):
def __init__(self, maximum, raise_on):
self.count = -1
self.maximum = maximum
self.raise_on = raise_on
def __iter__(self):
return self
def next(self):
self.count += 1
if self.count >= self.maximum:
raise StopIteration
if self.count == self.raise_on:
return fail(Exception())
return succeed(str(self.count))
class IteratorCommand(EventPlugin):
def on_command(self, msg):
args = msg.content.split()
if args and args[0] == 'defer':
return DeferredIterator(2, raise_on=int(''.join(args[1:]) or -2))
return imap(str, xrange(2))
class IteratorCommandTestCase(CommandMonitorMixin, TestCase):
command_class = IteratorCommand
def test_synchronous(self):
self.receive('PRIVMSG #foo :!iteratorcommand > party3')
self.assertEqual(self.outgoing.last_seen.action, MessageType.privmsg)
self.assertEqual(self.outgoing.last_seen.venue, '#foo')
self.assertEqual(self.outgoing.last_seen.content,
'\x0314party3: 0')
self.more(venue='#foo')
self.assertEqual(self.outgoing.last_seen.content,
'\x0314{}: 1'.format(self.other_users[0].nick))
self.more(venue='#foo')
self.assertEqual(self.outgoing.last_seen.content,
'\x0314{}: No results.'
.format(self.other_users[0].nick))
@inlineCallbacks
def test_deferred(self):
yield self.receive('PRIVMSG #foo :!iteratorcommand defer > party3')
self.assertEqual(self.outgoing.last_seen.action, MessageType.privmsg)
self.assertEqual(self.outgoing.last_seen.venue, '#foo')
self.assertEqual(self.outgoing.last_seen.content,
'\x0314party3: 0')
yield self.more(venue='#foo')
self.assertEqual(self.outgoing.last_seen.content,
'\x0314{}: 1'.format(self.other_users[0].nick))
yield self.more(venue='#foo')
self.assertEqual(self.outgoing.last_seen.content,
'\x0314{}: No results.'
.format(self.other_users[0].nick))
@inlineCallbacks
def test_deferred_error(self):
yield self.receive('PRIVMSG #foo :!iteratorcommand defer 1 > party3')
self.assertEqual(self.outgoing.last_seen.action, MessageType.privmsg)
self.assertEqual(self.outgoing.last_seen.venue, '#foo')
self.assertEqual(self.outgoing.last_seen.content,
'\x0314party3: 0')
yield self.more(venue='#foo')
self.assertEqual(self.outgoing.last_seen.content, collapse("""
\x0314{}: Command \x02more\x02 encountered an error."""
.format(self.other_users[0].nick)))
yield self.more(venue='#foo')
self.assertEqual(self.outgoing.last_seen.content,
'\x0314{}: No results.'
.format(self.other_users[0].nick))
self.assertLoggedErrors(1)
#
# Unicode replies
#
class UnicodeCommand(EventPlugin):
def on_command(self, msg):
return [u'☃'] * 2
class UnicodeReplyTestCase(CommandMonitorMixin, TestCase):
command_class = UnicodeCommand
@inlineCallbacks
def test_synchronous(self):
self.receive('PRIVMSG #foo :!unicodecommand')
self.assertEqual(self.outgoing.last_seen.action, MessageType.privmsg)
self.assertEqual(self.outgoing.last_seen.venue, '#foo')
self.assertEqual(self.outgoing.last_seen.content,
'\x0314{}: \xe2\x98\x83 (+1 more)'
.format(self.other_users[0].nick))
yield self.more(venue='#foo')
self.assertEqual(self.outgoing.last_seen.content,
'\x0314{}: \xe2\x98\x83'
.format(self.other_users[0].nick))
#
# Individual reply truncation
#
class ReplyTruncationTestCase(ConnectionTestMixin, TestCase):
def setUp(self):
super(ReplyTruncationTestCase, self).setUp()
self.outgoing = self.connection.settings.enable(OutgoingPlugin.name)
self.request = Message(self.connection, False, 'command',
actor=self.other_users[0], subaction='spam',
venue=self.connection.nickname, target=self.other_users[0].nick)
def test_str(self):
self.connection.reply(collapse("""\
Iliquat dictum patin rilit aciduipis, sectem nummolorem
esequat. Alisis nummolorem ros quatuer iuscing iure nonsequ,
ad commy congue, faccummy aut esequat quisi. Eugiam velis
odipsumsan ate a sismolore. Magniat vero sociosqu, mauris
quamconsequi irilit urna niscidu consequis, magniamet
aciduipis utet. Justo consequipis os, dolummy tempor nulla
vel corem adignim, sociis ate verostin."""), self.request)
self.assertEqual(self.outgoing.last_seen.content, collapse("""\
Iliquat dictum patin rilit aciduipis, sectem nummolorem
esequat. Alisis nummolorem ros quatuer iuscing iure nonsequ,
ad commy congue, faccummy aut esequat quisi. Eugiam velis
odipsumsan ate a sismolore. Magniat vero sociosqu, mauris
quamconsequi irilit urna niscidu consequis, magniamet a...
"""))
def test_unicode(self):
self.connection.reply(dedent(u"""\
《施氏食狮史》
石室诗士施氏,嗜狮,誓食十狮。
氏时时适市视狮。
十时,适十狮适市。
是时,适施氏适市。
氏视是十狮,恃矢势,使是十狮逝世。
氏拾是十狮尸,适石室。
石室湿,氏使侍拭石室。
石室拭,氏始试食是十狮。
食时,始识是十狮尸,实十石狮尸。
试释是事。"""), self.request)
self.assertEqual(self.outgoing.last_seen.content, collapse(u"""\
《施氏食狮史》 /
石室诗士施氏,嗜狮,誓食十狮。 /
氏时时适市视狮。 /
十时,适十狮适市。 /
是时,适施氏适市。 /
氏视是十狮,恃矢势,使是十狮逝世。 /
氏拾是十狮尸,适石室。 /
石室湿,氏使侍拭石室。 /
石...""").encode('utf-8'))
class LongReplyWithMoreCommand(EventPlugin):
def on_command(self, msg):
return ['*' * 999] * 999
class LongReplyWithMoreTestCase(CommandMonitorMixin, TestCase):
command_class = LongReplyWithMoreCommand
def test_more_tag_visible(self):
self.receive('PRIVMSG {} :longreplywithmorecommand'.format(
self.connection.nickname))
self.assertEqual(self.outgoing.last_seen.content,
'*' * MAX_REPLY_LENGTH + '... (+998 more)')
self.more()
self.assertEqual(self.outgoing.last_seen.content,
'*' * MAX_REPLY_LENGTH + '... (+997 more)')
|
# Generated by Django 3.1.4 on 2020-12-11 22:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('facility', '0001_initial'),
('members', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ServiceCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('icon', models.ImageField(upload_to='service_icon')),
('is_billable', models.BooleanField()),
('is_printable', models.BooleanField()),
('is_remarkable', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Services',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('amount', models.IntegerField(blank=True, null=True)),
('cat_name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='services.servicecategory')),
],
),
migrations.CreateModel(
name='ServiceBooking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('guest_name', models.CharField(blank=True, max_length=150, null=True)),
('guest_phone', models.CharField(blank=True, max_length=11, null=True)),
('guest_email', models.EmailField(blank=True, max_length=254, null=True)),
('payment_status', models.BooleanField()),
('is_completed', models.BooleanField()),
('date', models.DateTimeField(auto_now_add=True)),
('payment_date', models.DateTimeField(blank=True, null=True)),
('complete_date', models.DateTimeField(blank=True, null=True)),
('printable', models.FileField(blank=True, null=True, upload_to='')),
('Desc', models.TextField(blank=True, null=True)),
('facility', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='facility.facility')),
('member', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='members.member')),
('service_cat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='services.servicecategory')),
],
),
migrations.CreateModel(
name='BookedServices',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=0)),
('amount_payable', models.IntegerField()),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='services.services')),
('service_booking', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='services.servicebooking')),
],
),
]
|
import csv
import xml.dom.minidom
import sys
def GPSCoord(row):
# combine lat-longs from their columns, returned as a string.
return '%s,%s' % (row['longitude'],row['latitude'])
def createPlacemark(kmlDoc, row, order):
# This creates a element for a row of data.
# A row is a dict.
# Added option for place-mark label
placemarkElement = kmlDoc.createElement('Placemark')
nameElement = kmlDoc.createElement('name')
placemarkElement.appendChild(nameElement)
nameText = kmlDoc.createTextNode(row['ID'])
# //Change 'ID' in nameText to the CSV column heading with desired labels.
nameElement.appendChild(nameText)
extElement = kmlDoc.createElement('ExtendedData')
placemarkElement.appendChild(extElement)
styleUrlElement = kmlDoc.createElement('styleUrl')
styleUrlText = kmlDoc.createTextNode('#caseStyle')
styleUrlElement.appendChild(styleUrlText)
placemarkElement.appendChild(styleUrlElement)
# Loop through the columns and create a element for every field that has a value.
for key in order:
if row[key]:
dataElement = kmlDoc.createElement('Data')
dataElement.setAttribute('name', key)
valueElement = kmlDoc.createElement('value')
dataElement.appendChild(valueElement)
valueText = kmlDoc.createTextNode(row[key])
valueElement.appendChild(valueText)
extElement.appendChild(dataElement)
pointElement = kmlDoc.createElement('Point')
placemarkElement.appendChild(pointElement)
coordinates = GPSCoord(row)
coorElement = kmlDoc.createElement('coordinates')
coorElement.appendChild(kmlDoc.createTextNode(coordinates))
pointElement.appendChild(coorElement)
return placemarkElement
def createKML(csvReader, fileName, order):
# This constructs the KML document from the CSV file.
kmlDoc = xml.dom.minidom.Document()
kmlElement = kmlDoc.createElementNS('http://earth.google.com/kml/2.2', 'kml')
kmlElement.setAttribute('xmlns','http://earth.google.com/kml/2.2')
kmlElement = kmlDoc.appendChild(kmlElement)
documentElement = kmlDoc.createElement('Document')
documentElement = kmlElement.appendChild(documentElement)
#Setting style for normal place-markers.
styleElement = kmlDoc.createElement('Style')
documentElement.appendChild(styleElement)
styleIDElement = kmlDoc.createElement('id')
idText = kmlDoc.createTextNode('NormIconID')
styleIDElement.appendChild(idText)
styleElement.appendChild(styleIDElement)
iconElement = kmlDoc.createElement('Icon')
styleElement.appendChild(iconElement)
hrefElement = kmlDoc.createElement('href')
iconElement.appendChild(hrefElement)
iconLocation = kmlDoc.createTextNode('blu-diamond.png')
# //Change 'blu-diamond.png' to a web address or any other image in the same directory as the CSV + application.
#This image can be embedded when the KML is converted to KMZ (in google earth).
hrefElement.appendChild(iconLocation)
labelStyleElement = kmlDoc.createElement('LabelStyle')
styleElement.appendChild(labelStyleElement)
scaleElement = kmlDoc.createElement('scale')
scaleText = kmlDoc.createTextNode('0.4')
#//change label scaling to desired value
scaleElement.appendChild(scaleText)
labelStyleElement.appendChild(scaleElement)
#Setting style for place-markers when hovered over.
styleElement = kmlDoc.createElement('Style')
documentElement.appendChild(styleElement)
styleIDElement = kmlDoc.createElement('id')
idText = kmlDoc.createTextNode('HoverIconID')
styleIDElement.appendChild(idText)
styleElement.appendChild(styleIDElement)
iconStyleElement = kmlDoc.createElement('IconStyle')
iconElement = kmlDoc.createElement('Icon')
iconStyleElement.appendChild(iconElement)
scaleElement = kmlDoc.createElement('scale')
scaleText = kmlDoc.createTextNode('1.2')
#//change place-marker scaling to desired value
scaleElement.appendChild(scaleText)
iconStyleElement.appendChild(scaleElement)
styleElement.appendChild(iconStyleElement)
hrefElement = kmlDoc.createElement('href')
iconElement.appendChild(hrefElement)
iconLocation = kmlDoc.createTextNode('blu-diamond.png')
#//Change 'blu-diamond.png' to a web address or any other image in the same directory as the CSV + application.
#This image can be embedded when the KML is converted to KMZ (in google earth).
hrefElement.appendChild(iconLocation)
labelStyleElement = kmlDoc.createElement('LabelStyle')
styleElement.appendChild(labelStyleElement)
scaleElement = kmlDoc.createElement('scale')
scaleText = kmlDoc.createTextNode('0.44')
#//change label hover scaling to desired value
scaleElement.appendChild(scaleText)
labelStyleElement.appendChild(scaleElement)
#The style map
styleMapElement = kmlDoc.createElement('StyleMap')
documentElement.appendChild(styleMapElement)
styleMapID = kmlDoc.createElement('id')
styleMapIDtext = kmlDoc.createTextNode('caseStyle')
styleMapID.appendChild(styleMapIDtext)
styleMapElement.appendChild(styleMapID)
pairElement = kmlDoc.createElement('Pair')
styleMapElement.appendChild(pairElement)
keyElement = kmlDoc.createElement('key')
keyText = kmlDoc.createTextNode('normal')
keyElement.appendChild(keyText)
pairElement.appendChild(keyElement)
styleUrl = kmlDoc.createElement('styleUrl')
styleURLtext = kmlDoc.createTextNode('#NormIconID')
styleUrl.appendChild(styleURLtext)
pairElement.appendChild(styleUrl)
pairElement = kmlDoc.createElement('Pair')
styleMapElement.appendChild(pairElement)
keyElement = kmlDoc.createElement('key')
keyText = kmlDoc.createTextNode('highlight')
keyElement.appendChild(keyText)
pairElement.appendChild(keyElement)
styleUrl = kmlDoc.createElement('styleUrl')
styleURLtext = kmlDoc.createTextNode('#HoverIconID')
styleUrl.appendChild(styleURLtext)
pairElement.appendChild(styleUrl)
# Skip the header line.
csvReader.__next__()
for row in csvReader:
placemarkElement = createPlacemark(kmlDoc, row, order)
documentElement.appendChild(placemarkElement)
kmlFile = open(fileName, 'wb')
kmlFile.write(kmlDoc.toprettyxml(' ', newl = '\n', encoding = 'utf-8'))
def main():
# This reader opens up 'data.csv', which should be replaced with your own.
# It creates a KML file called 'Output.kml'.
# If an argument was passed to the script, it splits the argument on a comma
# and uses the resulting list to specify an order for when columns get added.
# Otherwise, it defaults to the order used in the sample.
if len(sys.argv) >1: order = sys.argv[1].split(',')
else: order = ['latitude','longitude','ID','Column3','column9','column5','columnN']
#//Change to match columns in CSV. They can be in what ever order you like.
csvreader = csv.DictReader(open('data.csv'),order)
#//Change to your input file name. No directory needed if Python application and CSV in the same folder.
kml = createKML(csvreader, 'Output.kml', order)
#//Change output.kml to desired output name.
if __name__ == '__main__':
main() |
__author__ = 'Danyang'
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def sumNumbers(self, root):
result = []
self.dfs(root, "", result)
result = [int(element) for element in result]
return sum(result)
def dfs(self, root, cur, result):
if not root:
return
cur = cur+str(root.val)
if not root.left and not root.right:
result.append(cur)
return
if root.left:
self.dfs(root.left, cur, result)
if root.right:
self.dfs(root.right, cur, result)
def dfs_error(self, root, cur, result):
if not root:
return
cur.append(root.val)
if not root.left and not root.right:
result.append(cur)
return
if root.left:
self.dfs_error(root.left, cur, result)
if root.right:
self.dfs_error(root.right, cur, result)
if __name__=="__main__":
nodes = [TreeNode(0), TreeNode(1), TreeNode(3)]
nodes[0].left = nodes[1]
nodes[0].right = nodes[2]
Solution().sumNumbers(nodes[0]) |
"""
Merge Sort
Invented by John Von Neumann:
http://en.wikipedia.org/wiki/Merge_sort
"""
import unittest
from random import sample
class TestMergeSort(unittest.TestCase):
def test_1(self):
N = 1000
v = list(reversed(range(N)))
self.assertEqual(mergesort(v), list(range(N)))
v = sample(range(10000000), N)
vsort = v
vsort.sort()
self.assertEqual(vsort, mergesort(v))
self.assertEqual([], mergesort([]))
self.assertEqual([10], mergesort([10]))
def mergesort(A):
"""sort elements of list using merge sort"""
if len(A) <= 1:
return A
else:
mid = int(len(A)/2)
return merge(mergesort(A[:mid]), mergesort(A[mid:]))
def merge(a, b):
"""merge 2 sorted lists a and b"""
res = []
while a and b:
if a[0] < b[0]:
res.append(a.pop(0))
else:
res.append(b.pop(0))
while a:
res.append(a.pop(0))
while b:
res.append(b.pop(0))
return res
if __name__ == "__main__":
unittest.main()
|
import numpy as np
import json
import math
from PreProcess import current_milli_time, getProcessedConcepts, data_dir
concepts, raw = getProcessedConcepts(path_to_concept_file=data_dir + "AllConcepts.txt")
with open(data_dir + "captions_train2014.json", "r", encoding="utf-8") as f:
dataStore = json.load(f)
#annotations -> caption
#Building vocabalury
VOCAB = {}
for annotation in dataStore['annotations']:
caption = annotation['caption']
caption = caption.replace(",", "").lower()
for word in set(caption.split(" ")):
if word not in VOCAB:
VOCAB[word] = 1
else:
VOCAB[word] += 1
def idf(word):
# For rare words
if word not in VOCAB:
return math.log(len(VOCAB))
return math.log(len(VOCAB)/VOCAB[word])
def match(inputQuery):
print("Finding best match for " + inputQuery)
st = current_milli_time()
idx = 0;
arr = np.zeros(len(concepts))
foundMatches = []
scores = []
for concept in concepts:
score = 0
concept_set = set(concept.split(" "))
count = 0
for word in inputQuery.split(" "):
if "person" in word or "man" in word or "people" in word:
continue
if concept_set.__contains__(word):
count += 1
score += idf(word)
arr[idx] = score
if score > 0:
foundMatches.append(concept)
scores.append(score*count)
idx += 1
print("Found matches: {}".format(str(foundMatches)))
print("Scores of the match: {}".format(str(scores)))
print("Found in {} milli secs".format(current_milli_time() - st))
return arr
|
"""
Copyright (c) 2014 Sandia Corporation.
Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
the U.S. Government retains certain rights in this software.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import optparse, sys
parser = optparse.OptionParser()
parser.add_option("-r", "--remote", action="store", type=str, dest="remote",
help="The server ip:port I'm connecting to")
(options, args) = parser.parse_args()
if __name__ == "__main__":
if options.remote:
if len(options.remote.split(":")) != 2:
parser.print_help()
exit(1)
ip = str(options.remote.split(":")[0])
port = int(options.remote.split(":")[1])
print " - Connecting to remote server %s:%s" % (ip, port)
sys.argv = sys.argv[:1]
import core.rshell as rshell
rs = rshell.RemoteOxideShell(ip, port)
rs.cmdloop()
else:
import core.oshell as oshell
oshell.OxideShell().cmdloop()
|
#!/usr/bin/python3
def main():
dict1={'1':"green",'2':"black",'3':"white",'4':"red"}
print(dict1)
key=input("Enter key to remove from dictionary=")
if key in dict1:
dict1.pop(key)
print(dict1)
if __name__=='__main__':
main()
|
import os
import torch
import numpy as np
import random
def set_seed(seed):
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed) # cpu
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # gpu
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
# set_seed(1)
# print('#randam:\t', torch.rand(5))
|
# 코드에서 시간 단축할부분: dis가 중복 계산되므로 이것도 dp로 저장해놓고 불러오기
# 차순서 구하려고 뒤로 다시 돌아갈때 앞에서 dp[col][col-1]이 어디서 온건지 전부 저장해놓았으면
# 돌아갈때 row찾는 반복을 안해도 되서 시간이줄듯
# mv=2*W*M 이 자주호출되므로 이것도 저장해놓고쓰기
N=int(input())
W=int(input())
wl=[[1,1],[N,N]]
for i in range(W):
wl.append(list(map(int,input().split())))
def dis(x,y):
return abs(wl[x][0]-wl[y][0])+abs(wl[x][1] - wl[y][1])
dp=[[0]*(W+2) for i in range(W+2)]
car=[[-1]*(W+2) for i in range(W+2)]
car[1][0]=1 # 0 1
# 각루트마다 차와 거리최솟값을 dp로 저장
#dp[i][j]는 i-2번째 명령이 실행되고(마지막움직인차는 여기 위치) 나머지차가 j-2번째 명령위치에 있음을 나타냄
# 명령에 맨처음의 0,0 N,N을 추가해 +2 의 오프셋이 생김
for i in range(2,W+2):
mv = 2 * N * W
for j in range(i-1):
dp[i][j]=dp[i-1][j]+dis(i,i-1)
car[i][j]=car[i-1][j]
if mv>dp[i-1][j]+dis(i,j):
mv=dp[i-1][j]+dis(i,j)
car[i][i-1]=(car[i-1][j]+1)%2
dp[i][i-1]=mv
#거리최솟값은 마지막행의 원소중 최솟값, 그리고 그위치의 row좌표를 구함
mv = 2 * N * W
for i in range(W+1):
if mv>dp[W+1][i]:
mv=dp[W+1][i]
r=i
c=W+1
print(mv)
#역추적하면서 차의 호출순서를 리스트에담음
li=[(c,r)]
while 2<c:
if r==c-1:
for j in range(c-1):
if dp[c - 1][j] + dis(c, j) == dp[c][r]:
r=j
break
c-=1
li.append((c, r))
#역으로 담긴리스트를 거꾸로 출력함.
for i in range(W-1,-1,-1):
print((car[li[i][0]][li[i][1]]+1)) |
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def set_width(self, width):
self.width = width
def set_height(self, height):
self.height = height
def get_area(self):
return self.width * self.height
def get_perimeter(self):
return 2 * (self.width + self.height)
def get_diagonal(self):
return (self.width ** 2 + self.height ** 2) ** 0.5
def get_picture(self):
picture = ''
if self.width > 50 or self.height > 50:
return "Too big for picture."
else:
for i in range(self.height):
picture += '*' * self.width + '\n'
# picture += '* ' * self.width + '\n'
# for i in range(self.height - 1):
# picture += '*' + ' ' * (self.width - 2) + ' *\n'
# picture += '* ' * self.width + '\n'
return picture
def get_amount_inside(self, shape):
w = self.width // shape.width
h = self.height // shape.height
return w * h
def __str__(self):
return f'Rectangle(width={self.width}, height={self.height})'
class Square(Rectangle):
def __init__(self, length):
self.width = length
self.height = length
def set_side(self, side):
self.width = side
self.height = side
def set_width(self, width):
self.width = width
self.height = width
def set_height(self, height):
self.height = height
self.width = height
def __str__(self):
return f'Square(side={self.width})'
rect = Rectangle(10, 5)
print(rect.get_area())
rect.set_height(3)
print(rect.get_perimeter())
print(rect)
print(rect.get_picture())
sq = Square(9)
print(sq.get_area())
sq.set_side(4)
print(sq.get_diagonal())
print(sq)
print(sq.get_picture())
rect.set_height(8)
rect.set_width(16)
print(rect.get_amount_inside(sq)) |
import PIL,os
from PIL import Image
import numpy as np
from scipy import linalg as LA
class PoolImages():
def __init__(self,directory):
#set of images to compare with the testimage
self.full_file_paths = self.get_filepaths(directory)#"./photos/training"
print('number of training examples:', len(self.full_file_paths))
self.images = []
self.SetImagesFromFolder()
#determine size each vector
self.imsize = len(self.images[0])
#calculate the average
self.Nimages = len(self.images)
self.Avimage = []
self.CalcAverage()
#calculate diff
self.diffimages = [0 for i in range(self.Nimages)]
self.CalcDiffImagesAv()
#compute the eigenvectors of the C=L^T
self.uvecs = [[0.0 for j in range(self.imsize)] for i in range(self.Nimages)]
self.CalcEigenvectors()
#calc images projections:
self.projdiffimages = [[0 for j in range(self.Nimages)] for i in range(self.Nimages)]
self.CalcProjectionImages()
def get_filepaths(self,directory):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
def SetImagesFromFolder(self):
for f in self.full_file_paths:
im = Image.open(f).convert('L')
pix = im.load()
w=im.size[0]
h=im.size[1]
imagetmp = []
for i in range(w):
for j in range(h):
imagetmp.append(pix[i,j])
self.images.append(imagetmp)
def CalcAverage(self):
for i in range(self.imsize):
tmpvalue = sum(row[i] for row in self.images)
self.Avimage.append(tmpvalue/self.Nimages)
def DumpAverage(self):
#see the av output
img=Image.open(self.full_file_paths[0]).convert('L')# grayscale
pix=img.load()
k=0
for i in range(img.size[0]):
for j in range(img.size[1]):
pix[i,j] = int(self.Avimage[k])
k=k+1
img.save("average.jpg")
def CalcDiffImagesAv(self):
for i in range(self.Nimages):
tmpdiff = []
for j in range(self.imsize):
tmpdiff.append(float(-self.Avimage[j]+self.images[i][j]))
#diffimages.append(tmpdiff)
self.diffimages[i] = tmpdiff
def CalcEigenvectors(self):
#calculate transponse covariance matrix
L = [[0 for j in range(self.Nimages)] for i in range(self.Nimages)]
for i in range(self.Nimages):
for j in range(self.Nimages):
L[i][j] = np.dot(self.diffimages[i],self.diffimages[j])
#calc eigenvalues/vectors
evals, evecs = LA.eig(L)
for i in range(self.Nimages):
for j in range(self.Nimages):
self.uvecs[i] = list(map(lambda x,y:x+evecs[i][j]*y, self.uvecs[i],self.diffimages[j]))
def CalcProjectionImages(self):
for i in range(self.Nimages):
for j in range(self.Nimages):
self.projdiffimages[i][j] = np.dot(self.uvecs[j],self.diffimages[i])
class CheckImage():
def __init__(self,FILENAME,directory):
self.imagetest = []
self.GetImage(FILENAME)
self.poolims= PoolImages(directory)
#calc diff and projection into subspace
self.utest = [0 for i in range(self.poolims.Nimages)]
self.uvecs = self.poolims.uvecs
self.Avim = self.poolims.Avimage
self.CalcProjectionIm()
self.projectionims = self.poolims.projdiffimages
self.poolims.DumpAverage()
#self.DeterminePoolImage
def GetImage(self,filename):
imtest=Image.open(filename).convert('L')# grayscale
pixtest = imtest.load()
w=imtest.size[0]
h=imtest.size[1]
for i in range(w):
for j in range(h):
self.imagetest.append(pixtest[i,j])
def CalcProjectionIm(self):
for i in range(self.poolims.Nimages):
self.utest[i] = sum(map(lambda x,y,z: x*(y-z),self.uvecs[i],self.imagetest,self.Avim))
def DeterminePoolImage(self):
#calc distances:
mindist = 1000
indx=0
for i in range(self.poolims.Nimages):
if i==0:
mindist= self.dist(self.utest,self.projectionims[i])
else:
if self.dist(self.utest,self.projectionims[i])<mindist:
mindist = self.dist(self.utest,self.projectionims[i])
indx =i
print('index=',indx,'mindist=',mindist)
#show closer image
imgresult=Image.open(self.poolims.full_file_paths[indx]).convert('L')
imgresult.save("result.jpg")
def dist(self,v1,v2):
'''
calculate the distance between 2 vectors
'''
d=0
for x in range(len(v1)):
d = d+(v1[x]-v2[x])*(v1[x]-v2[x])
return d
#pool images
folder = "./photos/training"
#test image here:
FILENAME='testimage.jpg' #image can be in gif jpeg or png format
#run script
test = CheckImage(FILENAME,folder)
test.DeterminePoolImage()
|
from funcy.flow import silent
from funcy.funcs import complement
from funcy.seqs import take, first
from .account import Account
from .instance import shared_steemd_instance
from .post import Post
from .utils import is_comment
class Blog:
""" Obtain a list of blog posts for an account
Args:
account_name (str): Name of the account
comments_only (bool): (Default False). Toggle between posts and comments.
steemd_instance (Steemd): Steemd instance overload
Returns:
Generator with Post objects in reverse chronological order.
Example:
To get all posts, you can use either generator:
::
gen1 = Blog('furion')
gen2 = b.all()
next(gen1)
next(gen2)
To get some posts, you can call `take()`:
::
b = Blog('furion')
posts = b.take(5)
"""
def __init__(self, account_name: str, comments_only=False, steemd_instance=None):
self.steem = steemd_instance or shared_steemd_instance()
self.comments_only = comments_only
self.account = Account(account_name)
self.history = self.account.history_reverse(filter_by='comment')
self.seen_items = set()
def take(self, limit=5):
""" Take up to n (n = limit) posts/comments at a time.
You can call this method as many times as you want. Once
there are no more posts to take, it will return [].
Returns:
List of posts/comments in a batch of size up to `limit`.
"""
# get main posts only
comment_filter = is_comment if self.comments_only else complement(is_comment)
hist = filter(comment_filter, self.history)
# filter out reblogs
match_author = lambda x: x['author'] == self.account.name
hist2 = filter(match_author, hist)
# post edits will re-appear in history
# we should therefore filter out already seen posts
def ensure_unique(post):
if post['permlink'] not in self.seen_items:
self.seen_items.add(post['permlink'])
return True
unique = filter(ensure_unique, hist2)
serialized = filter(bool, map(silent(Post), unique))
batch = take(limit, serialized)
return batch
def all(self):
""" A generator that will return ALL of account history. """
while True:
chunk = self.take(10)
if chunk:
yield from iter(chunk)
else:
break
def __iter__(self):
return self
def __next__(self):
next_item = first(self.take(1))
if not next_item:
raise StopIteration
return next_item
|
"""
Class that will contain a document (Basic unit -- "Primary Data-Structure)
"""
from util import *
from sentence import *
class Document:
def __init__(self, toInfo = None, fromInfo = None, data=None):
self.__sentences = []
self.__sCount = -1 #Number of sentences
self.__toInfo = toInfo #Who was the document to
self.__fromInfo = fromInfo #Who was the document from
self.__date = date
self.__fwd = False
self.__reply = False
def __getitem__(self, index):
return self.__sentences[index]
def __setitem__(self, index, value):
self.__sentence[index] = value
def getSCount(self):
#fill me (we should not have to ever set sCount)
pass
def setToInfo(self, value):
#fill me
pass
def getToInfo(self):
#fill me
pass
def setFromInfo(self, value):
#fill me
pass
def getFromInfo(self):
#fill me
pass
def setDate(self, year, month, day):
#should use date object in python datetime package
# mydate = datetime.date(year,month, day)
#fill me
pass
def getDate(self):
#returns year,month,day
pass
def setFwd(self, value):
#fill me
pass
def getFwd(self):
#fill me
pass
def setReply(self,value):
#fill me
pass
def getReply(self):
#fill me
pass
def testDocument():
"""
Used to test your Document Class
"""
pass
if __name__ == "__main__":
testDocument()
|
from sklearn import linear_model
import numpy as np
import language_check
import pandas as pd
import random
"""
Compute evaluation matrix.
"""
def weights_matrix(predicted, actual, score_max):
actual = np.array(actual)
weights = []
for i in range(score_max + 1):
row = []
for j in range(score_max + 1):
row.append((i - j) * (i - j) / ((score_max - 1) * (score_max - 1)))
weights.append(row)
return weights
def observed_matrix(predicted, actual,score_max):
predicted = np.array(predicted)
actual = np.array(actual)
weights = np.zeros([score_max + 1, score_max + 1])
# print("weights: ",weights)
for pred in predicted:
for actu in actual:
weights[pred][actual] += 1
return weights
def expected_matrix(predicted, actual, score_max):
observed = observed_matrix(predicted, actual, score_max)
ob = np.sum(observed, axis=0)
ex = np.sum(observed, axis=1)
outer = np.multiply.outer(ob, ex)
outer = outer / np.sum(observed)
return outer
def QWK(predicted, actual, score_max):
WO = weights_matrix(predicted, actual, score_max) * observed_matrix(predicted, actual, score_max)
WE = weights_matrix(predicted, actual, score_max) * expected_matrix(predicted, actual, score_max)
# print("=========================================")
# print(WO)
# print( np.sum(WO) )
# print("=========================================")
# print( np.sum(WE) )
# print(WE)
k = 1 - np.sum(WO) / np.sum(WE)
print("QWK is ", k)
|
# -*- coding: utf-8 -*-
"""
© Copyright 2014. Joon Yang & Jaemin Cheun. All rights reserved.
Significantly cuts down the computation by not evaluating a QValue
when we discover that it is worse than a previously examined Q value
"""
import progress
class ExpectipruneAgent:
def __init__(self, depth = '1'):
self.index = 0 # Computer is agent 0
self.depth = int(depth)
def getPolicy(self, initialState):
"""
Returns the expectimax action using self.depth
"""
def getReward(state):
return state.getAverage()
def terminalTest(state, depth):
return depth == 0
def ExpectipruneDecision(state):
"""returns action that maximizes minValue"""
# base case: action = None
max_value, policy = -float('inf'), None
# get all possible actions of computer, i.e. all possible questions
actions = state.getLegalActions("computer")
for act in actions:
new_value = playerNode(state.generateSuccessor("computer", act), self.depth - 1, max_value)
if max_value < new_value:
max_value, policy = new_value, act
return policy
#player Move
def playerNode(state, depth, alpha):
"""Nodes where player makes the move"""
if terminalTest(state,depth):
return getReward(state)
QValue = getReward(state)
QValue += state.getProbability() * MaxValue(state.generateSuccessor("human", 1), depth)
#if the highest possible Q value after calculating the first child is less than alpha, we prune the branch
if (QValue + (1- state.getProbability()) * depth) < alpha:
return QValue
QValue += (1 - state.getProbability()) * MaxValue(state.generateSuccessor("human", 0), depth)
return QValue
#Computer Move
def MaxValue(state, depth):
"""Nodes where computer asks a question"""
max_value = -float('inf')
# get all possible actions of computer
actions = state.getLegalActions("computer")
for act in actions:
new_value = playerNode(state.generateSuccessor("computer", act), depth - 1,max_value)
if max_value < new_value:
max_value = new_value
return max_value
# return the result of expectimax algorithm
return ExpectipruneDecision(initialState)
|
#!/user/bin/python
# Author: Yi Xing, Date: 05/28/2015
# Course: CS6200 Information Retrieval Summer 2015
# This script get each query from elasticsearch,
# pass the query to models class
import os
import re
import models
from nltk.stem.porter import *
from nltk.corpus import stopwords
STOPLIST = stopwords.words('english')
# print STOPLIST
stemmer = PorterStemmer()
# QUERYFILE='/Users/yixing/Documents/CS6200/AP_DATA/query_desc.51-100.short.txt'
# QUERYFILE='/Users/yixing/Documents/CS6200/Homework2/NewQueries.txt'
QUERYFILE='/Users/yixing/Documents/CS6200/Homework2/query_desc.51-100.short_v2.txt'
QUERYPATH = '/Users/yixing/Documents/CS6200/Homework2/temp'
def main():
type = 4 # the index type
m = models.QueryModel()
getIRmodels(m,type) # run 5 function models
getProximityModel(m) # run proximity search
def getIRmodels(m,type):
"""
get the query number list query dictionary, for each query use the model function to calculate it
:param m: QueryModel class
:param type: index type
:return: None
"""
query_numbers, querys = getQuerys(type)
print query_numbers
print querys
for query_num in query_numbers:
if query_num in querys.keys():
query = querys[query_num]
print query
m.models(query, query_num)
def getProximityModel(m):
"""
get the query dictionary and for each query us the proximity model to calculate it.
:param m: QueryModel class
:return: None
"""
proximity_query = getProximityQueries()
for num in proximity_query.keys():
query = proximity_query[num]
m.proximity_model(query, num)
def getQuerys(type):
"""
read the query file and get the query number and useful content, tokenize them by different index type
:param type: index type
:return: list of query number and a query content dictionary
"""
fh=open(QUERYFILE,"r")
querys=fh.readlines()
new_querys = {}
query_numbers = []
for query in querys:
if len(query) < 3:
continue
queryno = query.split(".")[0]
query_numbers.append(queryno)
print query.split(" ",6)[6]
new_query = createIndex(query.split(" ", 6)[6], type)
new_querys[queryno] = new_query
fh.close()
f2 = open("/Users/yixing/Documents/CS6200/Homework2/quer_stem", "w")
for num in new_querys:
tmp = (" ").join(new_querys[num])
f2.write('%s %s\n'%(num, tmp))
f2.close()
return query_numbers, new_querys
def createIndex(string,type):
"""
index the query content by different type
:param string: query string
:param type: index type
:return: query dictionary with key is query term and value is frequency
"""
query = tokenizer(string)
if type == 1:
query_list = query
elif type == 2:
query_list = stemWords(query)
elif type == 3:
query_list = removeStopWords(query)
elif type == 4:
query_list = stemWords(removeStopWords(query))
print query_list
query_map = createMap(query_list)
return query_map
def removeStopWords(text):
textlist = []
for word in text:
if word not in STOPLIST:
textlist.append(word)
return textlist
def stemWords(text):
stemwords = []
for word in text:
newword = stemmer.stem(word)
stemwords.append(newword)
return stemwords
def tokenizer(text):
textlist = re.findall(r"\w+(?:\.?\w+)*",text.lower())
print textlist
return textlist
def createMap(query_list):
map = {}
for word in query_list:
if word in map:
map[word] += 1
else:
map[word] = 1
return map
def getProximityQueries():
"""
get the proximity query dictionary, with key is the query number and value is query term list
:return: query dictionary
"""
fh = open(QUERYPATH, "r")
query_dict = {}
for line in fh.readlines():
if len(line) < 3:
continue
query_no = line.split(".")[0]
# print query_no
query = line.split(" ")[1:][:-1]
# print query
query_dict[query_no] = query
return query_dict
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.