text stringlengths 38 1.54M |
|---|
import pytest
from fcm_django.models import FCMDeviceQuerySet
from tests.factories import ReviewFactory
from jobadvisor.notifications.models import Message
@pytest.mark.django_db
def test_review_signal_create(mocker, company) -> None:
mocker.spy(Message.objects, "bulk_create")
ReviewFactory(company=company)
assert Message.objects.bulk_create.call_count == 1
|
# -*- coding: utf-8 -*-
"""
Updated Jan 21, 2018
The primary goal of this file is to demonstrate a simple unittest implementation
@author: jrr
@author: rk
"""
import unittest
from Triangle import classifyTriangle
# This code implements the unit test functionality
# https://docs.python.org/3/library/unittest.html has a nice description of the framework
class TestTriangle(unittest.TestCase):
# define multiple sets of tests as functions with names that begin
def testRightTriangleA(self):
self.assertEqual(classifyTriangle(3,4,5),'Right','3,4,5 is a Right triangle')
def testRightTriangleB(self):
self.assertEqual(classifyTriangle(5,3,4),'Right','5,3,4 is a Right triangle')
def testEquilateralTrianglesA(self):
self.assertEqual(classifyTriangle(1,1,1),'Equilateral','1,1,1 should be equilateral')
def testEquilateralTrianglesB(self):
self.assertEqual(classifyTriangle(100,100,100),'Equilateral','100,100,100 should be equilateral')
def testIsocelesTriangleA(self):
self.assertEqual(classifyTriangle(5,5,4),'Isoceles','5,5,4 is a Isoceles triangle')
def testIsocelesTriangleB(self):
self.assertEqual(classifyTriangle(100,100,50),'Isoceles','100,100,50 is a Isoceles triangle')
def testScaleneTriangleA(self):
self.assertEqual(classifyTriangle(100,150,60),'Scalene','100,150,60 is a Scalene triangle')
def testScaleneTriangleB(self):
self.assertEqual(classifyTriangle(110,123,50),'Scalene','110,123,50 is a Scalene triangle')
def testScaleneTriangleC(self):
self.assertEqual(classifyTriangle(2,4,3),'Scalene','2,4,3 is a Scalene triangle')
def testNotTriangleA(self):
self.assertEqual(classifyTriangle(150,100,50),'NotATriangle','150,100,50 cannot form a triangle')
def testNotTriangleB(self):
self.assertEqual(classifyTriangle(150,100,40),'NotATriangle','150,100,40 cannot form a triangle')
def testInvalidInputA(self):
self.assertEqual(classifyTriangle(250,100,150),'InvalidInput','250,100,150 is an invalid input as values greater than 200')
def testInvalidInputA1(self):
self.assertEqual(classifyTriangle(300,100,250),'InvalidInput','300,100,250 is an invalid input as values greater than 200')
def testInvalidInputB(self):
self.assertEqual(classifyTriangle(0,23,56),'InvalidInput','0,23.56 is an invalid input as ONE value is 0 ')
def testInvalidInputB1(self):
self.assertEqual(classifyTriangle(0,0,56),'InvalidInput','0,0,56 is an invalid input as TWO value is 0 ')
def testInvalidInputC(self):
self.assertEqual(classifyTriangle(1.2,5,6),'InvalidInput','1.2,5,6 is an invalid input as all the values are not integer ')
def testInvalidInputC1(self):
self.assertEqual(classifyTriangle(1.2,5.4,6),'InvalidInput','1.2,5.4,6 is an invalid input as all the values are not integer ')
if __name__ == '__main__':
print('Running unit tests')
unittest.main() |
from z3 import *
s = Solver()
x = Int('x')
s.add(16*x*x+145*x+9==0)
print s.check()
print s.model()
y = BitVec('y', 8)
s.add( y ^ 0x77 == 0 )
print s.model()
|
import logging
from interfaces.LTS import LTS
from module_generation.lts_to_verilog import lts_to_verilog
from module_generation.verilog_to_aiger_via_yosys import verilog_to_aiger
def lts_to_aiger(lts:LTS) -> str:
module_name = 'model'
v = lts_to_verilog(lts, module_name)
logging.debug('verilog output is \n' + v)
return verilog_to_aiger(v)
|
# -*- coding: utf-8 -*-
import django
from django.http import HttpResponseRedirect
from django.utils.http import urlencode
from allauth.account.adapter import get_adapter
from allauth.account.utils import get_next_redirect_url
from allauth.socialaccount import providers
from allauth.socialaccount.helpers import (
complete_social_login, render_authentication_error,
)
from allauth.socialaccount.models import SocialLogin
import cas
from . import CAS_PROVIDER_SESSION_KEY
from .exceptions import CASAuthenticationError
if django.VERSION >= (1, 10):
from django.urls import reverse
else:
from django.core.urlresolvers import reverse
class AuthAction(object):
AUTHENTICATE = 'authenticate'
REAUTHENTICATE = 'reauthenticate'
DEAUTHENTICATE = 'deauthenticate'
class CASAdapter(object):
def __init__(self, request):
self.request = request
@property
def renew(self):
"""
If user is already authenticated on Django, he may already been
connected to CAS, but still may want to use another CAS account.
We set renew to True in this case, as the CAS server won't use the
single sign-on.
To specifically check, if the current user has used a CAS server,
we check if the CAS session key is set.
"""
return CAS_PROVIDER_SESSION_KEY in self.request.session
def get_provider(self):
"""
Returns a provider instance for the current request.
"""
return providers.registry.by_id(self.provider_id, self.request)
def complete_login(self, request, response):
"""
Executed by the callback view after successful authentication on CAS
server.
Returns the SocialLogin object which represents the state of the
current login-session.
"""
login = (self.get_provider()
.sociallogin_from_response(request, response))
return login
def get_service_url(self, request):
"""
Returns the service url to for a CAS client.
From CAS specification, the service url is used in order to redirect
user after a successful login on CAS server. Also, service_url sent
when ticket is verified must be the one for which ticket was issued.
To conform this, the service url is always the callback url.
A redirect url is found from the current request and appended as
parameter to the service url and is latter used by the callback view to
redirect user.
"""
redirect_to = get_next_redirect_url(request)
callback_kwargs = {'next': redirect_to} if redirect_to else {}
callback_url = self.get_callback_url(request, **callback_kwargs)
service_url = request.build_absolute_uri(callback_url)
return service_url
def get_callback_url(self, request, **kwargs):
"""
Returns the callback url of the provider.
Keyword arguments are set as query string.
"""
url = reverse(self.provider_id + '_callback')
if kwargs:
url += '?' + urlencode(kwargs)
return url
class CASView(object):
@classmethod
def adapter_view(cls, adapter, **kwargs):
"""
Similar to the Django as_view() method.
It also setups a few things:
- given adapter argument will be used in views internals.
- if the view execution raises a CASAuthenticationError, the view
renders an authentication error page.
To use this:
- subclass CAS adapter as wanted:
class MyAdapter(CASAdapter):
url = 'https://my.cas.url'
- define views:
login = views.CASLoginView.adapter_view(MyAdapter)
callback = views.CASCallbackView.adapter_view(MyAdapter)
logout = views.CASLogoutView.adapter_view(MyAdapter)
"""
def view(request, *args, **kwargs):
# Prepare the func-view.
self = cls()
self.request = request
self.args = args
self.kwargs = kwargs
# Setup and store adapter as view attribute.
self.adapter = adapter(request)
self.provider = self.adapter.get_provider()
try:
return self.dispatch(request, *args, **kwargs)
except CASAuthenticationError:
return self.render_error()
return view
def get_client(self, request, action=AuthAction.AUTHENTICATE):
"""
Returns the CAS client to interact with the CAS server.
"""
auth_params = self.provider.get_auth_params(request, action)
service_url = self.adapter.get_service_url(request)
client = cas.CASClient(
service_url=service_url,
server_url=self.adapter.url,
version=self.adapter.version,
renew=self.adapter.renew,
extra_login_params=auth_params,
)
return client
def render_error(self):
"""
Returns an HTTP response in case an authentication failure happens.
"""
return render_authentication_error(self.request, self.provider.id)
class CASLoginView(CASView):
def dispatch(self, request):
"""
Redirects to the CAS server login page.
"""
action = request.GET.get('action', AuthAction.AUTHENTICATE)
SocialLogin.stash_state(request)
client = self.get_client(request, action=action)
return HttpResponseRedirect(client.get_login_url())
class CASCallbackView(CASView):
def dispatch(self, request):
"""
The CAS server redirects the user to this view after a successful
authentication.
On redirect, CAS server should add a ticket whose validity is verified
here. If ticket is valid, CAS server may also return extra attributes
about user.
"""
client = self.get_client(request)
# CAS server should let a ticket.
try:
ticket = request.GET['ticket']
except KeyError:
raise CASAuthenticationError(
"CAS server didn't respond with a ticket."
)
# Check ticket validity.
# Response format on:
# - success: username, attributes, pgtiou
# - error: None, {}, None
response = client.verify_ticket(ticket)
if not response[0]:
raise CASAuthenticationError(
"CAS server doesn't validate the ticket."
)
# The CAS provider in use is stored to propose to the user to
# disconnect from the latter when he logouts.
request.session[CAS_PROVIDER_SESSION_KEY] = self.provider.id
# Finish the login flow
login = self.adapter.complete_login(request, response)
login.state = SocialLogin.unstash_state(request)
return complete_social_login(request, login)
class CASLogoutView(CASView):
def dispatch(self, request, next_page=None):
"""
Redirects to the CAS server logout page.
next_page is used to let the CAS server send back the user. If empty,
the redirect url is built on request data.
"""
action = AuthAction.DEAUTHENTICATE
redirect_url = next_page or self.get_redirect_url()
redirect_to = request.build_absolute_uri(redirect_url)
client = self.get_client(request, action=action)
return HttpResponseRedirect(client.get_logout_url(redirect_to))
def get_redirect_url(self):
"""
Returns the url to redirect after logout from current request.
"""
request = self.request
return (
get_next_redirect_url(request) or
get_adapter(request).get_logout_redirect_url(request)
)
|
import json
import uuid
from bson.objectid import ObjectId
from lib.custom_except import duplicateError
from models import db
def get_all():
item_result = list(
db.TYPE_COLLECTION.aggregate(
[
{"$match": {"category": "item"}},
{
"$lookup": {
"from": "item",
"localField": "_id",
"foreignField": "type",
"as": "content",
}
},
{"$project": {"_id": 0, "content.type": 0}},
]
)
)
combo_result = list(
db.TYPE_COLLECTION.aggregate(
[
{"$match": {"category": "combo"}},
{
"$lookup": {
"from": "combo",
"localField": "_id",
"foreignField": "type",
"as": "content",
}
},
{
"$project": {
"_id": 0,
"content.type": 0,
"content.content.id": 0,
}
},
]
)
)
for item in item_result:
for content in item["content"]:
content["_id"] = str(content["_id"])
item["type"] = item.pop("name")
for combo in combo_result:
for content in combo["content"]:
content["_id"] = str(content["_id"])
combo["type"] = combo.pop("name")
return item_result + combo_result
def get_item_by_id(data, detail=False):
id = [ObjectId(i) for i in data]
pipeline = [
{"$match": {"_id": {"$in": id}}},
{
"$addFields": {
"_id": {"$toString": "$_id"},
"type": {"$toString": "$type"},
}
},
]
if detail is False:
pipeline.append({"$project": {"name": 1, "price": 1}})
result = list(db.ITEM_COLLECTION.aggregate(pipeline))
return result
def get_combo_by_id(data, detail=False):
id = [ObjectId(i) for i in data]
pipeline = [
{"$match": {"_id": {"$in": id}}},
{
"$addFields": {
"_id": {"$toString": "$_id"},
"type": {"$toString": "$type"},
"content": {
"$map": {
"input": "$content",
"as": "t",
"in": {
"id": {"$toString": "$$t.id"},
"quantity": "$$t.quantity",
"name": "$$t.name",
},
}
},
}
},
]
if detail is False:
pipeline.append({"$project": {"name": 1, "price": 1}})
result = db.COMBO_COLLECTION.aggregate(pipeline)
return list(result)
def get_type(item=False, combo=False):
# build match
match = []
if item:
match.append("item")
if combo:
match.append("combo")
# start query
return list(
db.TYPE_COLLECTION.aggregate(
[
{"$match": {"category": {"$in": match}}},
{"$project": {"_id": {"$toString": "$_id"}, "name": 1}},
]
)
)
def add_item(data, pic):
cur_item = db.ITEM_COLLECTION.find_one({"name": data.get("name")})
if cur_item:
raise duplicateError
else:
pic_id = str(uuid.uuid4())
db.ITEM_COLLECTION.insert_one(
{
"type": ObjectId(data.get("type")),
"name": data.get("name"),
"picture": pic_id,
"price": int(data.get("price")),
"description": data.get("description"),
}
)
db.IMAGE_COLLECTION.insert_one({"uuid": pic_id, "picture": pic})
def add_combo(data, pic):
cur_combo = db.COMBO_COLLECTION.find_one({"name": data.get("name")})
if cur_combo:
raise duplicateError
else:
pic_id = str(uuid.uuid4())
# pre processing content field
content = json.loads(data.get("content"))
for item in content:
item["id"] = ObjectId(item["id"])
item["name"] = db.ITEM_COLLECTION.find_one(
{"_id": item["id"]}, {"name": 1}
)["name"]
# start insert
db.COMBO_COLLECTION.insert_one(
{
"type": ObjectId(data.get("type")),
"name": data.get("name"),
"picture": pic_id,
"price": int(data.get("price")),
"description": data.get("description"),
"content": content,
}
)
db.IMAGE_COLLECTION.insert_one({"uuid": pic_id, "picture": pic})
def add_type(data):
cur_type = db.TYPE_COLLECTION.find_one(
{"category": data["category"], "name": data["type"]}
)
if cur_type:
raise duplicateError
else:
db.TYPE_COLLECTION.insert_one(
{"category": data["category"], "name": data["type"]}
)
def delete_item(id):
object_id = ObjectId(id)
item = db.ITEM_COLLECTION.find_one({"_id": object_id}, {"picture": 1})
db.ITEM_COLLECTION.delete_one({"_id": object_id})
db.IMAGE_COLLECTION.delete_one({"uuid": item["picture"]})
# delete cur item in combo
db.COMBO_COLLECTION.update_many(
{"content.id": object_id}, {"$pull": {"content": {"id": object_id}}}
)
def delete_combo(id):
object_id = ObjectId(id)
combo = db.COMBO_COLLECTION.find_one({"_id": object_id}, {"picture": 1})
db.COMBO_COLLECTION.delete_one({"_id": ObjectId(id)})
db.IMAGE_COLLECTION.delete_one({"uuid": combo["picture"]})
def delete_type(id):
# update item or combo type to undefined
object_id = ObjectId(id)
cur_type = db.TYPE_COLLECTION.find_one({"_id": object_id})
if cur_type["category"] == "item":
new_id = db.TYPE_COLLECTION.find_one({"name": "未分類(單品)"})["_id"]
db.ITEM_COLLECTION.update_many(
{"type": object_id}, {"$set": {"type": new_id}}
)
elif cur_type["category"] == "combo":
new_id = db.TYPE_COLLECTION.find_one({"name": "未分類(套餐)"})["_id"]
db.COMBO_COLLECTION.update_many(
{"type": object_id}, {"$set": {"type": new_id}}
)
# start delete
db.TYPE_COLLECTION.delete_one({"_id": object_id})
def update_item(data, pic):
if db.ITEM_COLLECTION.find_one(
{"_id": {"$ne": ObjectId(data["id"])}, "name": data["name"]}
):
raise duplicateError
else:
pic_id = db.ITEM_COLLECTION.find_one(
{"_id": ObjectId(data.get("id"))}, {"picture": 1}
)["picture"]
# update item
db.ITEM_COLLECTION.update_one(
{"_id": ObjectId(data.get("id"))},
{
"$set": {
"type": ObjectId(data.get("type")),
"name": data.get("name"),
"price": int(data.get("price")),
"description": data.get("description"),
}
},
)
# update item name in combo
db.COMBO_COLLECTION.update_many(
{"content.id": ObjectId(data.get("id"))},
{"$set": {"content.$.name": data.get("name")}},
)
# update pic
if pic != b"":
db.IMAGE_COLLECTION.update_one(
{"uuid": pic_id}, {"$set": {"picture": pic}}
)
def update_combo(data, pic):
if db.COMBO_COLLECTION.find_one(
{"_id": {"$ne": ObjectId(data["id"])}, "name": data["name"]}
):
raise duplicateError
else:
pic_id = db.COMBO_COLLECTION.find_one(
{"_id": ObjectId(data.get("id"))}, {"picture": 1}
)["picture"]
# pre processing content field
content = json.loads(data.get("content"))
for item in content:
item["id"] = ObjectId(item["id"])
item["name"] = db.ITEM_COLLECTION.find_one(
{"_id": item["id"]}, {"name": 1}
)["name"]
# start update
db.COMBO_COLLECTION.update_one(
{"_id": ObjectId(data.get("id"))},
{
"$set": {
"type": ObjectId(data.get("type")),
"name": data.get("name"),
"price": int(data.get("price")),
"description": data.get("description"),
"content": content,
}
},
)
if pic != b"":
db.IMAGE_COLLECTION.update_one(
{"uuid": pic_id}, {"$set": {"picture": pic}}
)
def update_type(data):
if db.TYPE_COLLECTION.find_one({"name": data["type"]}):
raise duplicateError
else:
db.TYPE_COLLECTION.update_one(
{"_id": ObjectId(data["id"])}, {"$set": {"name": data["type"]}}
)
|
def solution(phone_book):
answer = True
phone_book.sort()
for i in range(0, len(phone_book)-1):
k = len(phone_book[i])
print(k , phone_book[i])
for j in range(i+1, len(phone_book)):
if len(phone_book[j]) < k:
pass
else:
print(phone_book[j][:k], phone_book[i])
if phone_book[j][:k] == phone_book[i]:
return False
return answer
phone_book = ["119", "97674223", "1195524421"]
solution(phone_book) |
import os
import re
import sys
import io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,encoding='utf-8')
#根据文件扩展名判断文件类型
def endWith(s,*endstring):
array = map(s.endswith,endstring)
if True in array:
return True
else:
return False
#将全部已搜索到的关键字列表中的内容保存到result.log文件中
def writeResultLog(dirname, luaTableStr):
#行分隔符
ls = os.linesep
#结果日志文件名
logfilename = dirname + "/script/FileAuthorMap.lua" #相对路径,文件在.py文件所在的目录中
with open(logfilename,'w', encoding = 'utf-8') as fobj:
fobj.writelines(luaTableStr)
fobj.close()
#筛掉不符合要求的文件
def isConfigureFile(filename):
if (re.match(r'^DB_.*', filename)) : #DB 表
return True
elif (re.match(r'^copy_.*', filename)) :
return True
elif (re.match(r'^config_.*', filename)) :
return True
elif (re.match(r'^.*copy\d+.lua', filename)) :
return True
elif (re.match(r'^i18n_.*.lua', filename)) : # 国际化
return True
# 没有对应电话名字的按模块查找对应人
def getTelByModel(filename):
# 孙云鹏负责的模块
if (re.match(r'^SBList.*', filename)) :
return '18612697503'
if (re.match(r'^Partner.*', filename)) :
return '18612697503'
if (re.match(r'.*Drop.*', filename)) :
return '18612697503'
if (re.match(r'.*Cell.*', filename)) :
return '18612697503'
if (re.match(r'.*trea.*', filename) or re.match(r'.*Trea.*', filename)) :
return '18612697503'
if (re.match(r'.*MainTitle.*', filename)) :
return '18612697503'
if (re.match(r'.*Partner.*', filename) or re.match(r'.*Patner.*', filename) ) :
return '18612697503'
if (re.match(r'.*Bag.*', filename)) :
return '18612697503'
if (re.match(r'.*HeroFightUtil.*', filename)) :
return '18612697503'
#虞淙负责的模块
if (re.match(r'.*WorldBoss.*', filename)) :
return '18511452622'
if (re.match(r'.*Formation.*', filename)) :
return '18511452622'
if (re.match(r'.*Buzhen.*', filename)) :
return '18511452622'
if (re.match(r'.*SkyPiea.*', filename)) :
return '18511452622'
#吕南春负责的模块
if (re.match(r'.*copy.*', filename) or re.match(r'.*Copy.*', filename)) :
return '18810465863'
if (re.match(r'.*ShipLibrary.*', filename)) :
return '18511452622'
if (re.match(r'.*ShipInfo.*', filename)) :
return '18511452622'
#张琦
if (re.match(r'.*LevelWelfare.*', filename)) :
return '13810677600'
if (re.match(r'.*GroupPurchase.*', filename)) :
return '13810677600'
if (re.match(r'.*BuyBox.*', filename)) :
return '13810677600'
if (re.match(r'.*Recharge.*', filename)) :
return '13810677600'
if (re.match(r'.*Impel.*', filename)) :
return '13810677600'
#杨娜
if (re.match(r'.*MainRegist.*', filename)) :
return '18600262413'
if (re.match(r'.*MainRewardInfo.*', filename)) :
return '18600262413'
else :
return "isAtAll"
#通过名字查找电话
def getTelByName(name):
#是否是中文
# zh_pattern = re.compile(u'[\u4e00-\u9fa5]+')
# match = zh_pattern.search(name)
if (name == '孙云鹏' or name.lower() == 'sunyunpeng' or name.lower() == 'sunyupeng' or name.lower() == 'sunyunoeng') :
return '18612697503'
elif (name == '虞淙' or name.lower() == 'yucong' or name.lower() == 'yucng') :
return '18511452622'
elif (name == '张琦' or name.lower() == 'zhangqi') :
return '13810677600'
elif (name == '吕南春' or name.lower() == 'lvnanchun') :
return '18810465863'
elif (name == '胡晓周' or name.lower() == 'huxiaozhou') :
return '18611560890'
elif (name == '杨娜' or name.lower() == 'yangna') :
return '18600262413'
elif (name == '郭春昊' or name.lower() == 'guochunhao') :
return '17184094012'
else :
return None
#遍历打开所有要在其中搜索内容的文件,若待搜索关键字列表为空,则不再继续遍历
def searchFilesContent(dirname):
luaTableStr = '''module("FileAuthorMap", package.seeall)
'''
configureFileList = [] #配置列表
conformFileAuthorList = [] #确定名字的列表
noConformFileAuthorList = [] #不确定名字的列表
for root, dirs, files in os.walk(dirname):
for file in files:
if endWith(file,'.lua'): #只在扩展名为.lua文件中搜索
if (isConfigureFile(file)) : # 策划表
configureFileList.append(' ["' +file + '"]' + ' = ' + '"' + "isAtAll" + '"')
else:
filename = root + os.sep + file #绝对路径
#将路径中的单反斜杠替换为双反斜杠,因为单反斜杠可能会导致将路径中的内容进行转义了,replace函数中"\\"表示单反斜杠,"\\\\"表示双反斜杠
filename = filename.replace("\\","\\\\")
with open(filename,'r', encoding = 'utf-8') as fobj:
#遍历文件的每一行
for fileLine in fobj:
# pattern = re.compile(r'Author')
matchObj = re.match(r'(.*)(Author: )(.*)', fileLine)
if (matchObj != None) :
author = matchObj[3]
# 拼接table 字符串
telNum = getTelByName(author)
if (telNum) :
conformFileAuthorList.append(' ["' + file + '"]' + ' = ' + '"' + telNum + '"')
else :
telNum = getTelByModel(file)
noConformFileAuthorList.append(' ["' + file + '"]' + ' = ' + '"' + telNum + '"')
fileList = conformFileAuthorList + noConformFileAuthorList + configureFileList
#配置列表
luaTableStr = luaTableStr + '''
local authorMap = {
'''
for luaStr in fileList:
luaTableStr = luaTableStr + luaStr + ',' +'\n'
luaTableStr = luaTableStr + "}" + '''
-- 根据文件名字查找电话号码
function getTelByFileName( fileName )
return authorMap[fileName]
end
'''
writeResultLog(dirname, luaTableStr)
print("search file succed")
#仅当本python模块直接执行时,才执行如下语句,若被别的python模块引入,则不执行
if __name__ == '__main__':
searchFilesContent(sys.argv[1])
|
__author__ = 'wing2048'
import time
import math
import serial
from constants import *
class Servo():
def __init__(self, servo_id):
self.id = servo_id
self.angle = 0
self.velocity = 0
def set_angle(self, angle):
self.angle = angle
class Tool():
def __init__(self):
self.is_on = False
def toggle(self):
self.is_on = not self.is_on
if self.is_on:
print('TOOL ON')
else:
print('TOOL OFF')
def on(self):
self.is_on = True
def off(self):
self.is_on = False
def tiny_wait():
time.sleep(0.1)
def short_wait():
time.sleep(0.5)
def wait():
time.sleep(1)
def long_wait():
time.sleep(2)
class Robot():
def __init__(self, port, tool_pin):
self.port = port
self.online = True
g_file = open('gestures.db')
ready = False
self.gestures = {}
name = False
for line in g_file:
if line[0] == ':':
name = ' '.join(line.split()[1:])
ready = True
elif line[0] == '$':
if ready:
self.gestures[name] = ' '.join(line.split()[1:])
ready = False
g_file.close()
try:
self.serial = serial.Serial(port)
self.send("import pyb")
self.send("tool_pin = pyb.Pin(" + tool_pin + ")")
self.send('tool_led = pyb.LED(2)')
self.send("yaw_servo = pyb.Servo(1)")
self.send("bottom_pitch_servo = pyb.Servo(2)")
self.send("middle_pitch_servo = pyb.Servo(3)")
self.send("top_pitch_servo = pyb.Servo(4)")
except serial.SerialException:
print('ROBOT OFFLINE')
self.online = False
self.servos = []
self.yaw_servo = Servo(1)
self.bottom_pitch_servo = Servo(2)
self.middle_pitch_servo = Servo(3)
self.top_pitch_servo = Servo(4)
self.tool_servo = Servo(5)
self.tool = Tool()
self.center()
self.joints = []
self.command = {
'q': self.up,
'a': self.down,
'w': self.middle_up,
's': self.middle_down,
'e': self.top_up,
'd': self.top_down,
'z': self.left,
'x': self.right,
'Q': self.up,
'A': self.down,
'W': self.middle_up,
'S': self.middle_down,
'E': self.top_up,
'D': self.top_down,
'Z': self.left,
'X': self.right,
'c': self.center,
't': self.tool.on,
'T': self.tool.off,
'v': wait,
'V': long_wait,
'g': short_wait,
'G': tiny_wait,
}
self.top_joint = pygame.Rect((0, 0, 0, 0))
self.middle_joint = pygame.Rect((0, 0, 0, 0))
self.bottom_joint = pygame.Rect((0, 0, 0, 0))
self.yaw_joint = pygame.Rect((0, 0, 0, 0))
def send(self, comm):
self.serial.write((comm + '\r\t\n').encode())
self.flush()
def flush(self):
self.serial.flushInput()
self.serial.flushOutput()
def right(self, amount=move_amount):
self.yaw_servo.angle += amount
if self.yaw_servo.angle > 90:
self.yaw_servo.angle = 90
def left(self, amount=move_amount):
# self.send('yaw_servo.angle()')
self.yaw_servo.angle -= amount
if self.yaw_servo.angle < -90:
self.yaw_servo.angle = -90
def up(self, amount=move_amount):
self.bottom_pitch_servo.angle += amount
if self.bottom_pitch_servo.angle > 90:
self.bottom_pitch_servo.angle = 90
def down(self, amount=move_amount):
self.bottom_pitch_servo.angle -= amount
if self.bottom_pitch_servo.angle < -90:
self.bottom_pitch_servo.angle = -90
def middle_up(self, amount=move_amount):
self.middle_pitch_servo.angle += amount
if self.middle_pitch_servo.angle > 90:
self.middle_pitch_servo.angle = 90
def middle_down(self, amount=move_amount):
self.middle_pitch_servo.angle -= amount
if self.middle_pitch_servo.angle < -90:
self.middle_pitch_servo.angle = -90
def top_up(self, amount=move_amount):
self.top_pitch_servo.angle += amount
if self.top_pitch_servo.angle > 90:
self.top_pitch_servo.angle = 90
def top_down(self, amount=move_amount):
self.top_pitch_servo.angle -= amount
if self.top_pitch_servo.angle < -90:
self.top_pitch_servo.angle = -90
def tool_up(self, amount=move_amount):
self.tool_servo.angle += amount
if self.tool_servo.angle < -90:
self.tool_servo.angle = -90
def tool_down(self, amount=move_amount):
self.tool_servo.angle -= amount
if self.tool_servo.angle < -90:
self.tool_servo.angle = -90
def center(self):
self.yaw_servo.angle = 0
self.yaw_servo.set_angle(0)
self.bottom_pitch_servo.set_angle(0)
self.middle_pitch_servo.set_angle(0)
self.top_pitch_servo.set_angle(0)
self.tool_servo.set_angle(0)
def force_center(self):
self.send('yaw_servo.angle(0)')
self.send('bottom_pitch_servo.angle(0)')
self.send('middle_pitch_servo.angle(0)')
self.send('top_pitch_servo.angle(0)')
def test_led(self):
self.send('LED1.toggle()')
# noinspection PyShadowingNames
def interpret(self, c_str, screen):
for letter in c_str:
if letter in 'QAWSEDZX':
self.command[letter](10)
elif letter in 'vVgG':
self.update(screen)
pygame.display.flip()
self.command[letter]()
else:
self.command[letter]()
self.update(screen)
pygame.display.flip()
def gesture_interpret(self, gesture, screen):
print('EXECUTING MOVEMENT')
gesture_lib = {
'y': self.yaw_servo.set_angle,
'b': self.bottom_pitch_servo.set_angle,
'm': self.middle_pitch_servo.set_angle,
't': self.top_pitch_servo.set_angle,
}
for g_set in gesture.split('+'):
for action in g_set.split():
gesture_lib[action[0]](float(action[1:]))
self.update(screen)
short_wait()
# noinspection PyShadowingNames
def update(self, screen):
if self.online:
if self.tool.is_on:
self.send('tool_pin.high()')
self.send('tool_led.on()')
else:
self.send('tool_pin.low()')
self.send('tool_led.off()')
self.send('yaw_servo.angle(' + str(self.yaw_servo.angle) + ')')
self.send('bottom_pitch_servo.angle(' + str(self.bottom_pitch_servo.angle) + ')')
self.send('middle_pitch_servo.angle(' + str(self.middle_pitch_servo.angle / 2) + ')')
self.send('top_pitch_servo.angle(' + str(self.top_pitch_servo.angle / 2) + ')')
screen.fill((255, 255, 255))
# TODO: Fix ground boundaries
yaw_line_end = (
round(YAW_INDICATOR_CENTER[0] + 100 * math.cos(math.radians(
-1 * (self.yaw_servo.angle + 90)))),
round(YAW_INDICATOR_CENTER[1] + 100 * math.sin(math.radians(
-1 * (self.yaw_servo.angle + 90)))))
yaw_line_end = (yaw_line_end[0], YAW_INDICATOR_CENTER[1] * 2 - yaw_line_end[1])
bp_line_end = (
round(PITCH_INDICATOR_CENTER[0] + BOTTOM_ARM_LENGTH * math.cos(math.radians(
self.bottom_pitch_servo.angle - 90))),
round(PITCH_INDICATOR_CENTER[1] + BOTTOM_ARM_LENGTH * math.sin(math.radians(
self.bottom_pitch_servo.angle - 90))))
mp_line_end = (
round(bp_line_end[0] + MIDDLE_ARM_LENGTH * math.cos(math.radians(
self.middle_pitch_servo.angle - 90 + self.bottom_pitch_servo.angle))),
round(bp_line_end[1] + MIDDLE_ARM_LENGTH * math.sin(math.radians(
self.middle_pitch_servo.angle - 90 + self.bottom_pitch_servo.angle))))
tp_line_end = (
round(mp_line_end[0] + TOP_ARM_LENGTH * math.cos(math.radians(
self.top_pitch_servo.angle - 90 + self.middle_pitch_servo.angle + self.bottom_pitch_servo.angle))),
round(mp_line_end[1] + TOP_ARM_LENGTH * math.sin(math.radians(
self.top_pitch_servo.angle - 90 + self.middle_pitch_servo.angle + self.bottom_pitch_servo.angle))))
if mp_line_end[1] > PITCH_INDICATOR_CENTER[1]:
mp_line_end = (mp_line_end[0], PITCH_INDICATOR_CENTER[1])
if tp_line_end[1] > PITCH_INDICATOR_CENTER[1]:
tp_line_end = (tp_line_end[0], PITCH_INDICATOR_CENTER[1])
compass = pygame.image.load('protractor.jpg')
pygame.draw.rect(screen, (0, 0, 0), (screen.get_rect().width / 2 - 50, 200, 100, 50), 5)
pygame.draw.circle(screen, (0, 0, 0), PITCH_INDICATOR_CENTER, 3)
pygame.draw.circle(screen, (0, 0, 0), bp_line_end, 3)
pygame.draw.circle(screen, (0, 0, 0), mp_line_end, 3)
compensation = \
self.top_pitch_servo.angle + \
self.middle_pitch_servo.angle + \
self.bottom_pitch_servo.angle + \
self.tool_servo.angle - 90
if self.tool.is_on:
pygame.draw.polygon(screen, (255, 255, 0), (tp_line_end,
(tp_line_end[0] + 30 * math.cos(
math.radians(LIGHT_ANGLE + compensation)),
tp_line_end[1] + 30 * math.sin(
math.radians(LIGHT_ANGLE + compensation))),
(tp_line_end[0] + 30 * math.cos(
math.radians(-LIGHT_ANGLE + compensation)),
tp_line_end[1] + 30 * math.sin(
math.radians(-LIGHT_ANGLE + compensation)))))
pygame.draw.rect(screen, (0, 255, 0), (screen.get_rect().width / 2 - 45, 205, 90, 40))
pygame.draw.rect(screen, (0, 0, 0),
(tp_line_end[0] - 2, tp_line_end[1] - 2, 5, 5))
else:
pygame.draw.rect(screen, (255, 0, 0), (screen.get_rect().width / 2 - 45, 205, 90, 40))
pygame.draw.circle(screen, (0, 0, 0), tp_line_end, 3)
pygame.draw.rect(screen, (0, 0, 0), (screen.get_rect().width / 2 + 100, 200, 100, 50), 5)
pygame.draw.rect(screen, (150, 150, 255), (screen.get_rect().width / 2 + 105, 205, 90, 40))
tool_text = font.render('TOOL', 25, (0, 0, 0))
cntr_text = font.render('CNTR', 25, (0, 0, 0))
screen.blit(cntr_text, (screen.get_rect().centerx + 120, 260))
pitch_text = font.render('ARM PITCH', 25, (0, 0, 0))
yaw_text = font.render('YAW', 25, (0, 0, 0))
screen.blit(pygame.transform.smoothscale(compass, (201, 110)),
(YAW_INDICATOR_CENTER[0] - 100, YAW_INDICATOR_CENTER[1] - 5))
pygame.draw.circle(screen, (0, 0, 0), yaw_line_end, 3)
screen.blit(tool_text, (screen.get_rect().centerx - tool_text.get_rect().width / 2, 170))
screen.blit(pitch_text,
(PITCH_INDICATOR_CENTER[0] - pitch_text.get_rect().width / 2, PITCH_INDICATOR_CENTER[1] + 10))
screen.blit(yaw_text, (YAW_INDICATOR_CENTER[0] - yaw_text.get_rect().width / 2, YAW_INDICATOR_CENTER[1] + 110))
pygame.draw.aaline(screen, (0, 0, 0), yaw_line_end, YAW_INDICATOR_CENTER)
pygame.draw.aaline(screen, (0, 0, 0), (PITCH_INDICATOR_CENTER[0] - 50, PITCH_INDICATOR_CENTER[1]),
(PITCH_INDICATOR_CENTER[0] + 50, PITCH_INDICATOR_CENTER[1]), 2)
pygame.draw.aaline(screen, (0, 0, 0), bp_line_end, PITCH_INDICATOR_CENTER)
pygame.draw.aaline(screen, (0, 0, 0), mp_line_end, bp_line_end)
pygame.draw.aaline(screen, (0, 0, 0), tp_line_end, mp_line_end, 4)
self.bottom_joint = pygame.Rect(
bp_line_end[0] - TOUCH_ACCURACY, bp_line_end[1] - TOUCH_ACCURACY, TOUCH_ACCURACY * 2, TOUCH_ACCURACY * 2)
self.middle_joint = pygame.Rect(
mp_line_end[0] - TOUCH_ACCURACY, mp_line_end[1] - TOUCH_ACCURACY, TOUCH_ACCURACY * 2, TOUCH_ACCURACY * 2)
self.top_joint = pygame.Rect(
tp_line_end[0] - TOUCH_ACCURACY, tp_line_end[1] - TOUCH_ACCURACY, TOUCH_ACCURACY * 2, TOUCH_ACCURACY * 2)
self.yaw_joint = pygame.Rect(
yaw_line_end[0] - TOUCH_ACCURACY, yaw_line_end[1] - TOUCH_ACCURACY, TOUCH_ACCURACY * 2, TOUCH_ACCURACY * 2)
self.joints = {
'bottom': self.bottom_joint,
'middle': self.middle_joint,
'top': self.top_joint,
'yaw': self.yaw_joint
}
pygame.display.flip() |
from django.db import models
import datetime as dt
# Create your models here.
class Category(models.Model):
CATEGORIES =(("SpaceX","SpaceX"),("Blue Origin","Blue Origin"),("Virgin Atlantic","Virgin Atlantic"))
image_category = models.CharField(max_length=40,choices=CATEGORIES,)
def save_category(self):
self.save()
def delete_category(self):
self.delete()
@classmethod
def update_category(cls,category,update):
update = cls.objects.filter(image_category=category).update(image_category=update)
return update
def __str__(self):
return self.image_category
class Location(models.Model):
LOCATIONS=(("USA","USA"),("Canada","Canada"),("Britain","Britain"))
image_location = models.CharField(max_length=40,choices=LOCATIONS,)
def save_location(self):
self.save()
def delete_location(self):
self.delete()
@classmethod
def update_location(cls,location,update):
update = cls.objects.filter(image_location=location).update(image_location=update)
return update
def __str__(self):
return self.image_location
class Image(models.Model):
image_name = models.CharField(max_length=30)
image_description = models.TextField()
image_date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(upload_to='images/',default='nothing')
category = models.ForeignKey(Category)
location = models.ForeignKey(Location)
def __str__(self):
return self.image_name
def __unicode__(self):
return self.category
class Meta:
ordering = ['image_name']
#find out
def save_image(self):
self.save()
@classmethod
def search_by_title(cls,search_term):
image = cls.objects.filter(image_name__icontains=search_term)
return image
@classmethod
def filter_location(cls,location):
# location = Location.objects.(image_location=location)
images = cls.objects.filter(location__image_location__istartswith=location)
return images
@classmethod
def filter_category(cls,category):
images = cls.objects.filter(category__image_category__istartswith=category)
return images
# @classmethod
# def get_image(cls):
# image = cls.objects.filter(category__image_category__contains='SpaceX')
#
# return image
|
import numpy as np
import torch
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
import logging
from datetime import datetime
import time
import utils_.helpers as helpers
def get_loss(model, loss_func, data_loader):
losses, nums = zip(*[loss_batch(model, loss_func, None, batch[-1], batch[:-1]) for batch in data_loader])
return np.sum(np.multiply(losses, nums)) / np.sum(nums)
def fit(epochs, model, loss_func, opt, train_dl, valid_dl):
for epoch in range(epochs):
start_time = time.time()
model.train()
for batch in train_dl:
loss_batch(model, loss_func, opt, batch[-1], batch[:-1])
model.eval()
with torch.no_grad():
train_loss = get_loss(model, loss_func, train_dl)
val_loss = get_loss(model, loss_func, valid_dl)
logging.info(f"[{helpers.get_timestamp()}] E[{helpers.parse_execution_time(int(time.time() - start_time))}] Epoch {epoch}: {train_loss} ({val_loss})")
return train_loss, val_loss
def get_data(train_ds, valid_ds, batch_size):
return (
DataLoader(train_ds, batch_size=batch_size, shuffle=False),
DataLoader(valid_ds, batch_size=batch_size * 2, shuffle=False),
)
def loss_batch(model, loss_func, opt, yb, xb):
loss = loss_func(model(*xb), yb)
if opt is not None:
loss.backward()
opt.step()
opt.zero_grad()
return loss.item(), len(xb[0])
|
import tkinter as tk
from tkinter.scrolledtext import ScrolledText
from Ciphers.Nomenclator import nomenclator, PrintCodes
from Ciphers.UtilityFunctions import preptext
import webbrowser
# Create the window
root = tk.Tk()
# Don't let the user change the window size
root.maxsize(800,800)
root.minsize(800,800)
# Title of the window
root.title("The Overkill Cipher")
# Three textboxes
ptext = tk.Text(root,height=16,width=40)
key = tk.Text(root,height=1,width=16)
ctext = tk.Text(root,height=16,width=40)
cdgrps = ScrolledText(root,height=16,width=32)
# Exit Button
def qExit():
root.destroy()
# Reset Button
def Reset():
ctext.delete("1.0","end")
ptext.delete("1.0","end")
key.delete("1.0","end")
def link(event):
webbrowser.open_new(r"https://github.com/SymmetricChaos/ClassicCrypto")
#
def focus_next_widget(event):
event.widget.tk_focusNext().focus()
return("break")
# Encrypt function
def enc():
# Get the text from the ptext box
T = ptext.get("1.0","end")[:-1]
T = preptext(T)
# Get the key from the key box
K = key.get("1.0","end")[:-1]
# Blank the ctext and code groups boxes
ctext.delete("1.0","end")
cdgrps.delete("1.0","end")
# Try encrypting
try:
tx = nomenclator(T,int(K),decode=False)
except Exception as e:
ctext.insert("insert",str(e))
ctext.insert("insert",tx)
for i in PrintCodes(int(K)):
cdgrps.insert("insert",i)
cdgrps.insert("insert","\n")
# Decrypt function
def dec():
# Get the text from the ptext box
T = ptext.get("1.0","end")[:-1]
# Get the key from the key box
K = key.get("1.0","end")[:-1]
# Blank the ctext and code groups boxes
ctext.delete("1.0","end")
cdgrps.delete("1.0","end")
# Try encrypting
try:
tx = nomenclator(T,int(K),decode=True)
except Exception as e:
ctext.insert("insert",str(e))
ctext.insert("insert",tx)
for i in PrintCodes(int(K),decode=True):
cdgrps.insert("insert",i)
cdgrps.insert("insert","\n")
# Button to run cipher in encrypt mode
encryptbutton = tk.Button(root, text="Encrypt", command = enc,
bg = 'lightblue', font = ('arial',14,'bold'))
# Button to run cipher in decrypt mode
decryptbutton = tk.Button(root, text="Decrypt", command = dec,
bg = 'lightgreen', font = ('arial',14,'bold'))
# Button to clear everything
resetbutton = tk.Button(root, text="Clear", command = Reset,
bg = 'lightslateblue', font = ('arial',14,'bold'))
# Button to run cipher in decrypt mode
exitbutton = tk.Button(root, text="Exit", command = qExit,
bg = 'salmon', font = ('arial',14,'bold'))
# Labels
ptextLab = tk.Label(root,text="Input:",font = ('arial',14))
ctextLab = tk.Label(root,text="Output:",font = ('arial',14))
keywordLab = tk.Label(root,text="Key:",font = ('arial',14))
dictLab = tk.Label(root,text="Code Groups:",font = ('arial',14))
explainLab = tk.Label(root,
text="All symbols except letters from the standard English alphabet will be removed.",
font = ('arial',12),
wraplength=200,
relief=tk.GROOVE,
padx = 10, pady = 10)
linkLab = tk.Label(root, text="See The Code",
font = ('courier',12),
relief=tk.GROOVE,
padx = 5, pady = 5,
fg="blue", cursor="hand2")
# Tab control
ptext.bind("<Tab>", focus_next_widget)
key.bind("<Tab>", focus_next_widget)
ctext.bind("<Tab>", focus_next_widget)
# Put everything in position
linkLab.place(x=600,y=730)
linkLab.bind("<Button-1>", link)
explainLab.place(x=530,y=100)
ptext.place(x=130,y=30)
ptextLab.place(x=40,y=30)
key.place(x=130,y=300)
keywordLab.place(x=40,y=300)
encryptbutton.place(x=130,y=330)
decryptbutton.place(x=230,y=330)
resetbutton.place(x=380,y=330)
ctext.place(x=130,y=380)
ctextLab.place(x=30,y=380)
dictLab.place(x=500,y=350)
cdgrps.place(x=500,y=380)
exitbutton.place(x=150,y=700)
root.mainloop() |
import json
import math
class Agent:
"""docstring for Agent"""
def say_hello(sef, first_name):
return "Bien le bonjour"+first_name+" !"
def __init__(self, **agent_attributes):
for attr_name, attr_value in agent_attributes.items():
# set attribute
setattr(self, attr_name,attr_name)
class Possition:
"""docstring for ClassName"""
def __init__(self, longitude, latitude):
self.latitude = latitude
self.longitude = longitude
class Zone:
MIN_LONGITUDE_DEGREES = -180
MAX_LONGITUDE_DEGREES = 180
MIN_LATITUDE_DEGREES = -90
MAX_LATITUDE_DEGREES = 90
WIDTH_DEGREES = 1 # degrees of longitude
HEIGHT_DEGREES = 1 # degrees of latitude
ZONES = []
EARTH_RADIUS_KILOMETERS = 6371
def __init__(self, corner1, corner2):
self.corner1 = corner1
self.corner2 = corner2
self.inhabitants = 0
longitude = self.MIN_LONGITUDE_DEGREES
def add_inhabitants(self,inhabitant):
self.inhabitants.append(inhabitant)
@property
def population(self):
return len(self.inhabitants)
@property
def width(self):
return abs(self.corner1.longitude - self.corner2.longitude)*self.EARTH_RADIUS_KILOMETERS
@property
def height(self):
return abs(self.corner1.longitude - self.corner2.longitude)*self.EARTH_RADIUS_KILOMETERS
@property
def area(self):
return self.height * self.width
def population_density(self):
return self.population / self.area
def average_agreeableness(self):
if not self.inhabitants:
return 0
# agreeableness = []
#for inhabitant in self.inhabitants:
#agreeableness.append(inhabitant.agreeableness)
return sum([inhabitant.agreeableness for inhabitant in self.inhabitants]) / self.population
@classmethod
def _initialize_zones(cls):
for latitude in range (cls.MIN_LATITUDE_DEGREES, cls.MAX_LATITUDE_DEGREES, cls.HEIGHT_DEGREES):
for longitude in range(cls.MIN_LONGITUDE_DEGREES, cls.MAX_LONGITUDE_DEGREES, cls.WIDTH_DEGREES):
bottom_left_corner = Position(longitude, latitude)
top_right_corner = Position(longitude + cls.WIDTH_DEGREES, latitude + cls.HEIGHT_DEGREES)
zone = Zone(bottom_left_corner, top_right_corner)
cls.ZONES.append(zone)
print(len(cls.ZONES))
Zone.initialize_zones()
def main():
for agent_attributes in json.load(open("agent-100k.json")):
latitude = agent_attributes.pop('latitude')
longitude = agent_attributes.pop('longitude')
position = Possition(longitude, latitude)
agent = Agent(position, **agent_attributes)
zone = Zone.find_zone_that_contains(position)
zone.add_inhabitants(agent)
print(zone.average_agreeableness())
main()
|
from tests.integration.integration_test_case import IntegrationTestCase
from tests.integration.it_utils import submit_transaction_async, test_async_and_sync
from tests.integration.reusable_values import WALLET
from xrpl.models.response import ResponseStatus
from xrpl.models.transactions import EscrowCancel
ACCOUNT = WALLET.classic_address
OWNER = "rf1BiGeXwwQoi8Z2ueFYTEXSwuJYfV2Jpn"
OFFER_SEQUENCE = 7
class TestEscrowCancel(IntegrationTestCase):
@test_async_and_sync(globals())
async def test_all_fields(self, client):
escrow_cancel = EscrowCancel(
account=ACCOUNT,
sequence=WALLET.sequence,
owner=OWNER,
offer_sequence=OFFER_SEQUENCE,
)
response = await submit_transaction_async(escrow_cancel, WALLET)
# Actual engine_result is `tecNO_TARGET since OWNER account doesn't exist
self.assertEqual(response.status, ResponseStatus.SUCCESS)
|
1. Assert: _O_ is an Object that has a [[ViewedArrayBuffer]] internal slot.
1. Assert: The [[ViewedArrayBuffer]] internal slot of _O_ is *undefined*.
1. Assert: _length_ ≥ 0.
1. Let _constructorName_ be the String value of _O_'s [[TypedArrayName]] internal slot.
1. Let _elementSize_ be the Element Size value in <emu-xref href="#table-49"></emu-xref> for _constructorName_.
1. Let _byteLength_ be _elementSize_ × _length_.
1. Let _data_ be ? AllocateArrayBuffer(%ArrayBuffer%, _byteLength_).
1. Set _O_'s [[ViewedArrayBuffer]] internal slot to _data_.
1. Set _O_'s [[ByteLength]] internal slot to _byteLength_.
1. Set _O_'s [[ByteOffset]] internal slot to 0.
1. Set _O_'s [[ArrayLength]] internal slot to _length_.
1. Return _O_. |
# -*- coding: utf-8 -*-
from Tkinter import *
CANVAS_WIDTH = 600
CANVAS_HEIGHT = 600
CELL_X = CELL_Y = 35
def paint_grid(canvas):
for x in range(CELL_X, CANVAS_WIDTH, CELL_Y):
canvas.create_line(x, 0, x, CANVAS_HEIGHT, fill="black")
for y in range(CELL_X, CANVAS_HEIGHT, CELL_Y):
canvas.create_line(0, y, CANVAS_WIDTH, y, fill="black")
def ufill_cell(canvas, x, y, fill):
rect = canvas.create_rectangle(10, 10, 20, 20, fill="blue")
canvas.delete(rect)
def main():
tinker = Tk()
wind = Canvas(tinker, width=CANVAS_WIDTH, height=CANVAS_HEIGHT)
wind.pack()
paint_grid(wind)
mainloop()
ufill_cell(wind, 10, 10, "blue")
if __name__ == '__main__':
main()
|
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
# Create your models here.
class Cliente(models.Model):
nombre = models.CharField(max_length=250)
telefono = PhoneNumberField(unique=True)
notas = models.TextField(blank=True)
def ultima_visita_con_reserva(self):
return self.reservas.all().order_by('fecha').last().fecha
def numero_de_visitas(self):
return self.reservas.all().count()
def __str__(self):
return self.nombre
class Reserva(models.Model):
cliente = models.ForeignKey(to=Cliente,on_delete=models.CASCADE,related_name="reservas")
fecha = models.DateTimeField(verbose_name="Fecha de la reserva")
comensales = models.IntegerField(verbose_name="Numero de comensales") |
import os
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as Data
import torch.optim as optim
import torch.nn.functional as F
data_csv_path = ["SM_HighBP","SM_Normal","SM_pneumonia","SM_SARS"]
def data_preprocessing(data_path,num_feature=23):
path = os.path.join("sars-cov-1")
datasets = []
test_datasets = []
label = int(-1)
for category in data_path:
# set label
label+=1
current_path = os.path.join(path,category+".csv")
with open(current_path) as f:
idx = 0
data = []
test_data = []
for line in f.readlines():
idx+=1
single_data_list = line.strip().split(',')
single_data = np.array(single_data_list,dtype=np.float32)[:num_feature]
if idx < 924:
data.append(single_data)
else:
test_data.append(single_data)
data = torch.from_numpy(np.array(data))
test_data = torch.from_numpy(np.array(test_data))
labels = torch.full((data.shape[0],1),label,dtype=torch.long)
test_labels = torch.full((test_data.shape[0],1),label,dtype=torch.long)
current_dataset = Data.TensorDataset(data,labels)
current_test_dataset = Data.TensorDataset(test_data,test_labels)
datasets.append(current_dataset)
test_datasets.append(current_test_dataset)
# print(f"there are {idx} datapoints under label {label}.")
ans_dataset = Data.ConcatDataset(datasets)
ans_test_dataset = Data.ConcatDataset(test_datasets)
ans_dataloader = Data.DataLoader(ans_dataset,shuffle=True,batch_size=20)
test_dataloader = Data.DataLoader(ans_test_dataset,shuffle=True,batch_size=10)
return ans_dataloader,test_dataloader
def correlation_loss(predictions, labels):
"""
A simple correlation loss function.
:param predictions: Tensor
:param labels: Tensor
:return: ordinary correlation loss
"""
vp = predictions - torch.mean(predictions)
vl = labels - torch.mean(labels)
# cost = torch.sum(vp * vl) / (torch.sqrt(torch.sum(vp ** 2)) * torch.sqrt(torch.sum(vl ** 2)))
cost = torch.mean(vp*vl) / (torch.std(predictions)*torch.std(labels))
return cost
# Constructive Cascade Neural Network
class Cascade_Network(nn.Module):
def __init__(self, input_size, num_classes, input2hidden_layers, hidden2hidden_layers, hidden2output_layers):
super().__init__()
self.num_hiddens = 0
self.input_size = input_size
self.num_classes = num_classes
self.initial_input_layer = nn.Linear(input_size, num_classes)
# module dict for different layers
self.input2hidden_layers = input2hidden_layers
self.hidden2hidden_layers = hidden2hidden_layers
self.hidden2output_layers = hidden2output_layers
# for correlation GD
self.latest_hidden_out = None
def forward(self, x):
input_out = self.initial_input_layer(x) # input directly to output
if self.num_hiddens == 0:
return input_out
# store all outputs from input layer which are also the input for any hidden layers
H_in = list()
# store the first output from input layer (between input and first hidden unit)
H_in.append(F.leaky_relu(self.input2hidden_layers['0'](x)))
if self.num_hiddens == 1:
# if only one hidden layer inserted
out2 = self.hidden2output_layers['0'](H_in[0])
self.latest_hidden_out = out2
return input_out + out2
# if num_hiddens>1, do the following iteration
hidden_idx = 0 # record the index of hidden2hidden_layers
for i in range(1, self.num_hiddens):
# build the current hidden unit, init with self.input_hidden
current_hidden_unit = F.leaky_relu(self.input2hidden_layers[str(i)](x))
for h in H_in:
current_hidden_unit += F.leaky_relu(self.hidden2hidden_layers[str(hidden_idx)](h))
hidden_idx += 1
H_in.append(current_hidden_unit)
# Connect hidden layer to output
final_out = input_out
# record the index of hidden2output_layers
hidden2out_idx = 0
for h in H_in:
final_out = final_out + self.hidden2output_layers[str(hidden2out_idx)](h)
hidden2out_idx += 1
self.latest_hidden_out = self.hidden2output_layers[str(self.num_hiddens-1)](H_in[-1])
return final_out
def add_neuron(self):
"""
the network will add one more neuron/layer into hidden layer.
NOTICE: call optimize_correlation and freeze_neuron afterwards to get the optimized frozen weights and freeze
the input2hidden and hidden2hidden layer.
"""
self.num_hiddens += 1
self.input2hidden_layers[str(len(self.input2hidden_layers))] = nn.Linear(self.input_size, 1, bias=False)
for n_connection in range(self.num_hiddens - 1):
self.hidden2hidden_layers[str(len(self.hidden2hidden_layers))] = nn.Linear(1, 1,bias=False)
self.hidden2output_layers[str(len(self.hidden2output_layers))] = nn.Linear(1, self.num_classes, bias=False)
return
def optimize_correlation(self,dataloader,num_epochs=10, optimizer=None):
"""
optimize the correlation between final output error of labels and latest internal output by new neuron.
:param optimizer: use specified optimizer. default SGD
:param num_epochs: number of sub-epochs. default 10
:param dataloader: sub-data loader to train new input2hidden and hidden2hidden.
:return: used optimizer
"""
print(" Start Correlation optimizing...")
if optimizer is None:
optimizer = optim.SGD(self.parameters(),lr=0.001,momentum=0.9)
loss_sub_log = []
for epoch in range(num_epochs):
current_loss = float(0)
# batch_num = 0
for batch_idx, batch_data in enumerate(dataloader,start=0):
data, labels = batch_data
optimizer.zero_grad()
forward_correlation_result = F.softmax(self.forward(data),dim=1)
# labels_extended = labels.expand(forward_correlation_result.shape)
labels_extended = torch.zeros(forward_correlation_result.shape)
for idx, label in enumerate(labels):
labels_extended[idx][label] = 1
error = forward_correlation_result-labels_extended
# print(forward_correlation_result[0])
# print(labels_extended[0])
# print(labels[0])
loss = -correlation_loss(self.latest_hidden_out,error)
loss.backward()
optimizer.step()
current_loss = loss.item()
# batch_num+=1
loss_sub_log.append(current_loss)
print(f" sub epoch {epoch} correlation loss: {-current_loss}")
return optimizer
def freeze_neuron(self,optimizer):
"""
freeze the previous and current weight.
:param optimizer: optimizer params to be frozen
:return: optimizer: frozen optimizer
"""
n_neurons = self.num_hiddens
params = []
for i in range(n_neurons):
params.append(
# input2hidden
{'params': self.input2hidden_layers[str(i)].parameters(), 'lr': 0},
)
params.append(
# hidden2output
{'params': self.hidden2output_layers[str(i)].parameters(), 'lr': 0.001},
)
if n_neurons > 1:
for i in range(int(n_neurons*(n_neurons-1)/2)):
params.append(
# hidden2hidden
{'params': self.hidden2hidden_layers[str(i)].parameters(), 'lr': 0},
)
optimizer = torch.optim.SGD(params, momentum=0.9,lr=0.001)
return optimizer
def test_accuracy(dataloader,network):
# final test set
true_postive = 0
total = 0
for batch_idx, batch_data in enumerate(dataloader, start=0):
data, labels = batch_data
prediction = F.softmax(network(data), dim=1)
# print(prediction.shape)
ans = torch.tensor([np.argmax(each.detach().numpy()) for each in prediction])
# print("answer vs label")
# print(ans)
# print(labels.squeeze())
labels = labels.squeeze()
for i in range(ans.shape[0]):
if ans[i] == labels[i]:
true_postive += 1
total += 1
return true_postive, total
if __name__ == "__main__":
num_feature = 12
n_epochs = 90
max_hidden = 10
train_dataloader, test_dataloader = data_preprocessing(data_csv_path,num_feature=num_feature)
sample = train_dataloader.dataset
# print(train_dataloader.dataset.__len__())
print(np.array(list(enumerate(train_dataloader.dataset))).shape)
input_hidden_layers = nn.ModuleDict()
hidden_hidden_layers = nn.ModuleDict()
hidden_output_layers = nn.ModuleDict()
cascade_network = Cascade_Network(num_feature,4,input_hidden_layers,hidden_hidden_layers,hidden_output_layers)
print(cascade_network)
loss_CE=nn.CrossEntropyLoss()
optimizer = optim.SGD(
cascade_network.parameters(),
lr=0.001,
momentum=0.9)
loss_epoch_log = []
hidden_neuron_num = 0
# limit the frequency of additional neurons
add_neuron_counter = 0
previous_loss = float('inf')
for epoch in range(n_epochs):
current_loss = float(0)
epoch_loss = float(0)
for batch_idx, batch_data in enumerate(train_dataloader,start=0):
data,labels = batch_data
optimizer.zero_grad()
forward_result = cascade_network(data)
loss = loss_CE(forward_result,labels.squeeze())
loss.backward()
optimizer.step()
current_loss = loss.item()
epoch_loss = current_loss
# if batch_idx %100 == 99:
# current_loss /= 100
# print(f"epoch {epoch+1} batch No.{batch_idx+1} loss: {current_loss}")
# epoch_loss = current_loss
# current_loss = 0
print(f"epoch {epoch+1} training loss: {current_loss}")
#
if loss_epoch_log != [] and add_neuron_counter == 0 and previous_loss - epoch_loss < 0 \
and epoch < n_epochs*0.8 and cascade_network.num_hiddens < max_hidden:
cascade_network.add_neuron()
hidden_neuron_num += 1
add_neuron_counter = 5
print(f"ADD NEURON in epoch {epoch}. There are {cascade_network.num_hiddens} in total")
cascade_network.optimize_correlation(train_dataloader)
optimizer = cascade_network.freeze_neuron(optimizer)
# print(f"ADD {hidden_neuron_num}th NEURON ends")
previous_loss = epoch_loss
add_neuron_counter -= 1
add_neuron_counter = max(add_neuron_counter,0)
tp,total = test_accuracy(test_dataloader,cascade_network)
print(f"epoch {epoch+1} test acc: {tp*100/total} %")
loss_epoch_log.append((epoch_loss,cascade_network.num_hiddens,tp*100/total))
final_true_positive, final_total = test_accuracy(test_dataloader,cascade_network)
# additional loss log here
print(f"Additional Loss Log \n Loss Log Format: epoch, loss, number of hidden neurons, accuracy")
for i,item in enumerate(loss_epoch_log):
print(i+1,item)
# final performance
print(f"Final test accuracy: {final_true_positive * 100 / final_total} %")
print(f"ratio: {final_true_positive}/{final_total}")
print(f"overall hidden neuron added: {hidden_neuron_num}")
print("DONE.")
|
'''
command for zim custom tool: python path/to/zim_to_textile.py -T %T -f %f
* textile format: http://www.redmine.org/projects/redmine/wiki/RedmineTextFormattingTextile#External-links
'''
import argparse
import pyperclip
from zim.formats import get_parser
from zim.formats import UNCHECKED_BOX, XCHECKED_BOX, CHECKED_BOX, BULLET, BULLETLIST, NUMBEREDLIST
from zim.formats import EMPHASIS, STRONG, MARK, STRIKE, VERBATIM, TAG, SUBSCRIPT, SUPERSCRIPT
from zim.formats.plain import Dumper as TextDumper
from zim.parsing import url_re
class Dumper(TextDumper):
'''Inherit from wiki format Dumper class, only overload things that are different'''
BULLETS = {UNCHECKED_BOX: u'\u2610',
XCHECKED_BOX: u'\u2612',
CHECKED_BOX: u'\u2611',
BULLET: u'*',
}
TAGS = {EMPHASIS: ('_', '_'),
STRONG: ('*', '*'),
MARK: ('+', '+'),
STRIKE: ('-', '-'),
VERBATIM: ("<pre>", "</pre>"),
TAG: ('@', '@'),
SUBSCRIPT: ('~', '~'),
SUPERSCRIPT: ('^', '^'),
}
def dump_link(self, tag, attrib, strings=None):
href = attrib['href']
text = u''.join(strings) or href
if href == text and url_re.match(href):
return href
else:
return ['"%s":%s' % (text, href)]
def dump_h(self, tag, attrib, strings):
level = int(attrib['level'])
heading = u''.join(strings)
return ['h%d. ' % level, heading, '\n']
def dump_ul(self, tag, attrib, strings):
return strings
def dump_ol(self, tag, attrib, strings):
return strings
def dump_li(self, tag, attrib, strings):
level = self._count_list_level()
if self.context[-1].tag == NUMBEREDLIST:
bullet = u'#' * level
else:
bullet = self.BULLETS[BULLET] * level
if 'bullet' in attrib and attrib['bullet'] != BULLET and attrib['bullet'] in self.BULLETS:
bullet += (' ' + self.BULLETS[attrib['bullet']])
return (bullet, ' ') + tuple(strings) + ('\n',)
def _count_list_level(self):
level = 0
for i in range(-1, -len(self.context) - 1, -1):
if self.context[i].tag in (BULLETLIST, NUMBEREDLIST):
level += 1
else:
break
return level
def dump_img(self, tag, attrib, strings=None):
src = attrib['src']
if src.startswith('./'):
src = src[2:]
text = attrib.get('alt', '')
if text:
return ['!%s(%s)!\n' % (src, text)]
else:
return ['!%s!\n' % src]
def dump_object(self, tag, attrib, strings=None):
if 'type' in attrib:
t = attrib['type']
if t == 'code':
c = attrib.get('lang', "")
if c == "sh":
c = "python" # missing support of sh, see http://coderay.rubychan.de/
return ['<pre><code class="%s">\n' % c] + strings + ['</code></pre>\n']
return super(Dumper, self).dump_object(tag, attrib, strings)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-T', dest='wiki_text', help='the selected text including wiki formatting')
parser.add_argument('-f', dest='file', help='the page source as temporary file')
args = parser.parse_args()
zim_parser = get_parser('wiki')
if args.wiki_text:
wiki_text = args.wiki_text
else:
wiki_text = open(args.file).read()
tree = zim_parser.parse(wiki_text)
try:
dumper = Dumper()
lines = dumper.dump(tree)
textile_text = ''.join(lines).encode('utf-8')
pyperclip.copy(textile_text)
except Exception as e:
pyperclip.copy(e.message)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 18 17:55:33 2021
@author: Osvaldo
"""
class nodoSimple:
def __init__(self, d = None):
self.dato = d
self.liga = None
def asignarDato(self, d):
self.dato = d
def asignarLiga(self, x):
self.liga = x
def retornarDato(self):
return self.dato
def retornarLiga(self):
return self.liga
|
# a="MISSISSIPPI"
# b={}
# for i in a:
# count=0
# if i not in b.keys():
# for j in a:
# if i==j:
# count+=1
# b[i]=count
# print(b)
# for i in a:
# if i in b:
# b[i]+=1
# else:
# b[i]=1
# print(b)
count = {"M":0,"I":0,"S":0,"P":0}
word = "MISSISSIPPI"
for i in word:
if i == "M":
count['M'] = count['M']+1
elif i == "I":
count['I'] = count['I']+1
elif i == "S":
count['S'] = count['S']+1
elif i == "P":
count['P'] = count['P']+1
print (count)
|
import pygame
class Bullet1(pygame.sprite.Sprite): # 继承 pygame.sprite.Sprite 类
def __init__(self, position): # 构造函数,传入对象和背景大小
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("images/bullet1.png").convert_alpha() # 创建图像
self.rect = self.image.get_rect() # 获取具有图像尺寸的矩形对象
self.rect.left, self.rect.top = position # 子弹位置
self.speed = 12 # 子弹速度
self.active = True # 子弹存活
self.mask = pygame.mask.from_surface(self.image) # 取对象图片中非透明部分
def move(self):
self.rect.top -= self.speed
if self.rect.top < 0:
self.active = False
def reset(self, position):
self.rect.left, self.rect.top = position # 子弹位置
self.active = True # 子弹存活
class Bullet2(pygame.sprite.Sprite): # 继承 pygame.sprite.Sprite 类
def __init__(self, position): # 构造函数,传入对象和背景大小
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("images/bullet2.png").convert_alpha() # 创建图像
self.rect = self.image.get_rect() # 获取具有图像尺寸的矩形对象
self.rect.left, self.rect.top = position # 子弹位置
self.speed = 12 # 子弹速度
self.active = True # 子弹存活
self.mask = pygame.mask.from_surface(self.image) # 取对象图片中非透明部分
def move(self):
self.rect.top -= self.speed
if self.rect.top < 0:
self.active = False
def reset(self, position):
self.rect.left, self.rect.top = position # 子弹位置
self.active = True # 子弹存活
|
import modutil # will need the function modinv() which returns the inverse of an integer
def blocksize(n):
"""returns the size of a block in an RSA encrypted string"""
twofive = "25"
while int(twofive) < n:
twofive += "25"
return len(twofive) - 2
def RSAletters2digits(letters):
"""converts a string of letters without spaces to a string of integers"""
letter2digit = {"A" : "00", "B" : "01", "C" : "02", "D" : "03", "E" : "04",
"F" : "05", "G" : "06", "H" : "07", "I" : "08", "J" : "09",
"K" : "10", "L" : "11", "M" : "12", "N" : "13", "O" : "14",
"P" : "15", "Q" : "16", "R" : "17", "S" : "18", "T" : "19",
"U" : "20", "V" : "21", "W" : "22", "X" : "23", "Y" : "24",
"Z" : "25"}
letters_copy = letters.replace(" ", "") # getting rid of spaces
digits = ""
for c in letters_copy:
digits += letter2digit[c.upper()]
return digits
def RSAdigits2letters(digits):
"""converts a string of double digits without spaces in the range 00-25 to a string of letters A-Z"""
letter2digit = {"A" : "00", "B" : "01", "C" : "02", "D" : "03", "E" : "04",
"F" : "05", "G" : "06", "H" : "07", "I" : "08", "J" : "09",
"K" : "10", "L" : "11", "M" : "12", "N" : "13", "O" : "14",
"P" : "15", "Q" : "16", "R" : "17", "S" : "18", "T" : "19",
"U" : "20", "V" : "21", "W" : "22", "X" : "23", "Y" : "24",
"Z" : "25"}
digit2letter = dict((v,k) for k,v in letter2digit.items()) #creating a dictionary with keys and values exchanged
letters = ""
start = 0 #initializing starting index of first digit
for i in range(0, len(digits), 2):
digit = digits[start : start + 2] # accessing the double digit
letters += digit2letter[digit] # concatenating to the string of letters
start += 2 # updating the starting index for next digit
return letters
## Decryption
def decryptRSA(c, p, q, e):
c_copy = c.replace(" ", "")
m = (p - 1) * (q - 1)
e_inv = modutil.modinv(e, m)
n = p * q
#
digits = ""
k = blocksize(n)
start = 0
for i in range(0,len(c_copy), k):
block = c_copy[start: start + k] # accessing each block of digits
digit = str(int(block) ** e_inv % n) # decrypting the block
if len(digit) % 2 != 0: #padding the block before adding it to the string
digits += "0" + digit
else:
digits += digit
start += k #updating starting index for next block
return RSAdigits2letters(digits) #converting digits to letters
## Encryption
def encryptRSA(s, a, b, e):
s_copy = s.replace(" ", "")
n = a * b
#STEP 1 & 2: CONVERT TO STRING OF INTEGERS
digits_string = RSAletters2digits(s_copy)
# STEP 3 & 4: DIVIDE INTO BLOCKS OF 2N DIGITS ENCRYPT EACH BLOCK AND CONCATENATE
# determining l = 2N
l = blocksize(n)
# padding if necessary
if len(digits_string) % l != 0:
diff = l - len(digits_string) % l
digits_string = digits_string + "23" * (diff//2) # Letter X = 23
# encrypting and concatenating
encryption = ""
for i in range(0, len(digits_string), l):
start = i
end = i + l
base = int(digits_string[start: end])
digit = str(base ** e % n)
if len(digit) % 2 != 0:
encryption = encryption + " " + "0" + digit
else:
encryption = encryption + " " + digit
return encryption
encrypted1 = encryptRSA("STOP", 43, 59, 13)
decrypted1 = decryptRSA(encrypted1, 43, 59, 13)
print("Encrypted Message:", encrypted1)
print("Decrypted Message:", decrypted1)
encrypted2 = encryptRSA("HELP", 43, 59, 13)
decrypted2 = decryptRSA(encrypted2, 43, 59, 13)
print("Encrypted Message:", encrypted2)
print("Decrypted Message:", decrypted2)
encrypted1 = encryptRSA("STOPS", 43, 59, 13)
decrypted1 = decryptRSA(encrypted1, 43, 59, 13)
print("Encrypted Message:", encrypted1)
print("Decrypted Message:", decrypted1)
|
import pytest
from hypothesis import given, assume
from unittest import TestCase
from stylo.testing.strategies import real
from stylo.utils import bounded_property
@pytest.mark.utils
class TestBoundedProperty(TestCase):
"""Tests to ensure the :code:`bounded_property` factory function produces
properties as expected."""
def test_checks_bounded_below_type(self):
"""Ensure that :code:`bounded_property` informs the user when using a value
for :code:`bounded_below` with the incorrect type."""
with pytest.raises(TypeError) as err:
bounded_property("length", bounded_below="width")
self.assertIn("must be a number", str(err.value))
self.assertIn("bounded_below", str(err.value))
def test_checks_bounded_below_by_type(self):
"""Ensure that :code:`bounded_property` informs the user when using
a value for :code:`bounded_below_by` with the incorrect type."""
with pytest.raises(TypeError) as err:
bounded_property("length", bounded_below_by=2.0)
self.assertIn("must be a string", str(err.value))
self.assertIn("bounded_below_by", str(err.value))
def test_checks_bounded_above_type(self):
"""Ensure that :code:`bounded_property` informs the user when using
a value for :code:`bounded_above` with the incorrect type."""
with pytest.raises(TypeError) as err:
bounded_property("length", bounded_above="width")
self.assertIn("must be a number", str(err.value))
self.assertIn("bounded_above", str(err.value))
def test_checks_bounded_above_by_type(self):
"""Ensure that :code:`bounded_property` informs the user when
using a value for :code:`bounded_above_` with the incorrect type."""
with pytest.raises(TypeError) as err:
bounded_property("length", bounded_above_by=2.0)
self.assertIn("must be a string", str(err.value))
self.assertIn("bounded_above_by", str(err.value))
def test_checks_above_arguments(self):
"""Ensure that :code:`bounded_property` informs the user that it only
makes sense to use either :code:`bounded_above_by` or :code:`bounded_above`"""
with pytest.raises(ValueError) as err:
bounded_property("name", bounded_above=1, bounded_above_by="prop")
self.assertIn("You can only use", str(err.value))
def test_checks_below_arguments(self):
"""Ensure that :code:`bounded_below` informs the user that it only
makes sense to use either :code:`bounded_below` or :code:`bounded_below_by`"""
with pytest.raises(ValueError) as err:
bounded_property("name", bounded_below=1, bounded_below_by="prop")
self.assertIn("You can only use", str(err.value))
@given(value=real)
def test_uses_getter(self, value):
"""Ensure that the property constructed by :code:`bounded_property`
returns the value it manages"""
class MyClass:
_length = value
length = bounded_property("length")
my_class = MyClass()
self.assertEqual(value, my_class.length)
@given(value=real)
def test_uses_setter(self, value):
"""Ensure that the property constructed by :code:`bounded_property`
still allows the user to set the value even if no bounds are specified."""
class MyClass:
length = bounded_property("length")
my_class = MyClass()
my_class.length = value
self.assertEqual(value, my_class.length)
def test_setter_checks_type(self):
"""Ensure that the property constructed by :code:`bounded_property`
checks the type of the value provided."""
class MyClass:
length = bounded_property("length")
my_class = MyClass()
with pytest.raises(TypeError) as err:
my_class.length = "10m"
self.assertIn("length", str(err.value))
self.assertIn("must be a number", str(err.value))
@given(value=real, bound=real)
def test_setter_checks_bounded_below(self, value, bound):
"""Ensure that when given a lower bound via :code:`bounded_below` that
the property constructed by :code:`bounded_property` checks the value
against that bound."""
assume(value <= bound)
class MyClass:
length = bounded_property("length", bounded_below=bound)
my_class = MyClass()
with pytest.raises(ValueError) as err:
my_class.length = value
self.assertIn("must be strictly larger than", str(err.value))
self.assertIn(str(bound), str(err.value))
value = 1 - value
bound = -bound
# Now we have value > bound
class MyOtherClass:
length = bounded_property("length", bounded_below=bound)
my_other_class = MyOtherClass()
my_other_class.length = value
self.assertEqual(value, my_other_class.length)
@given(value=real, bound=real)
def test_setter_checks_bounded_below_by(self, value, bound):
"""Ensure that when given an attribute to act as a lower bound via
:code:`bounded_below_by` that the property constructed by
:code:`bounded_property` checks the value against that bound."""
assume(value <= bound)
class MyClass:
width = bound
length = bounded_property("length", bounded_below_by="width")
my_class = MyClass()
with pytest.raises(ValueError) as err:
my_class.length = value
self.assertIn("must be strictly larger than", str(err.value))
self.assertIn("width", str(err.value))
value = 1 - value
bound = -bound
# Now we have value > bound
class MyOtherClass:
width = bound
length = bounded_property("length", bounded_below_by="width")
my_other_class = MyOtherClass()
my_other_class.length = value
self.assertEqual(value, my_other_class.length)
@given(value=real, bound=real)
def test_setter_checks_bounded_above(self, value, bound):
"""Ensure that when given an upper bound via :code:`bounded_above`
that the property constructed by :code:`bounded_property` checks the
value against that bound."""
assume(value >= bound)
class MyClass:
length = bounded_property("length", bounded_above=bound)
my_class = MyClass()
with pytest.raises(ValueError) as err:
my_class.length = value
self.assertIn("must be strictly less than", str(err.value))
self.assertIn(str(bound), str(err.value))
value = -1 - value
bound = -bound
# We now have value < bound
class MyOtherClass:
length = bounded_property("length", bounded_above=bound)
my_other_class = MyOtherClass()
my_other_class.length = value
self.assertEqual(value, my_other_class.length)
@given(value=real, bound=real)
def test_setter_checks_bounded_above_by(self, value, bound):
"""Ensure that when given an upper bound via :code:`bounded_above_by`
that the property constructed by :code:`bounded_property` checks the
value against the bound.
"""
assume(value >= bound)
class MyClass:
width = bound
length = bounded_property("length", bounded_above_by="width")
my_class = MyClass()
with pytest.raises(ValueError) as err:
my_class.length = value
self.assertIn("must be strictly less than", str(err.value))
self.assertIn("width", str(err.value))
value = -1 - value
bound = -bound
# Now we have value < bound
class MyOtherClass:
width = bound
length = bounded_property("length", bounded_above_by="width")
my_other_class = MyOtherClass()
my_other_class.length = value
self.assertEqual(value, my_other_class.length)
|
# encoding=utf8
from flask import Flask, request, jsonify, session, render_template
from flask_pymongo import PyMongo
from gevent import pywsgi
from flask_cors import CORS
import random
import time
app = Flask(__name__)
app.debug = True
app.config['JSON_AS_ASCII'] = False
CORS(app, supports_credentials=True)
app.config["MONGO_URI"] = "mongodb://localhost:27017/visual"
mongo = PyMongo(app)
@app.route('/')
def index():
"""
页面入口
"""
return render_template("index.html")
@app.route('/getCpuStatus')
def get_cup_status():
"""
模拟获取系统CPU状态
"""
cpu_total = 100
cpu_use = round(random.uniform(0, cpu_total), 2)
cpu_unit = "%"
now = int(round(time.time()*1000))
res = {
"total": cpu_total,
"use": cpu_use,
"unit": cpu_unit,
"time": now
}
result = success_with_data(res)
mongo.db.cpu_record.insert_one(res)
return result
@app.route("/getCpuHistoryStatus")
def get_cpu_history_status():
"""
获取cpu的历史数据
"""
records = mongo.db.cpu_record.find().limit(100)
data = []
for record in records:
item = {
"total": record["total"],
"use": record["use"],
"unit": record["unit"],
"time": record["time"]
}
data.append(item)
return success_with_data(data=data)
@app.route('/getMemoryStatus')
def get_memory_status():
"""
模拟获取系统内存状态
"""
memory_total = 16
memory_use = round(random.uniform(0.5*memory_total, memory_total), 2)
memory_unit = "GB"
now = int(round(time.time()*1000))
res = {
"total": memory_total,
"use": memory_use,
"unit": memory_unit,
"time": now
}
result = success_with_data(res)
mongo.db.memory_record.insert_one(res)
return result
@app.route("/getMemoryHistoryStatus")
def get_memory_history_status():
"""
获取内存的历史数据
"""
records = mongo.db.memory_record.find().limit(100)
data = []
for record in records:
item = {
"total": record["total"],
"use": record["use"],
"unit": record["unit"],
"time": record["time"]
}
data.append(item)
return success_with_data(data=data)
###################### 封装的返回Response ######################
def success():
return response(0, "success")
def success_with_data(data):
return response(0, "success", data)
def fail(msg):
return response(-1, msg)
def response(code, msg, data=''):
return jsonify({'code': code, 'msg': msg, 'data': data})
# 封装的返回Response ###################### End
if __name__ == "__main__":
server = pywsgi.WSGIServer(('0.0.0.0', 8090), app)
server.serve_forever()
|
"""
Django settings for project {{ project_name }}.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
import os
from corsheaders.defaults import default_headers, default_methods
from smart_getenv import getenv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY', '{{ secret_key }}')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = getenv('DEBUG', type=bool, default=True)
ALLOWED_HOSTS = getenv('ALLOWED_HOSTS', type=list, default=['*'])
# If the app is running behind a proxy, this variable must be set with the proxy path
# See https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#force-script-name
FORCE_SCRIPT_NAME = os.getenv('PROXY_SCRIPT_NAME', None)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'django_extensions',
'django_filters',
'drf_yasg',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'core.middleware.RevisionMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'core.context_processors.application_info',
]
},
}
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES_ENGINE_MAP = {
'mysql': 'django.db.backends.mysql',
'oracle': 'django.db.backends.oracle',
'postgresql': 'django.db.backends.postgresql',
'postgresql_psycopg2': 'django.db.backends.postgresql_pycopg2',
'sqlite3': 'django.db.backends.sqlite3',
}
DATABASES = {
'default': {
'ENGINE': DATABASES_ENGINE_MAP.get(os.getenv('DB_ENGINE', 'sqlite3')),
'NAME': os.getenv('DB_NAME', os.path.join(BASE_DIR, 'db.sqlite3')),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': os.getenv('DB_HOST'),
'PORT': os.getenv('DB_PORT'),
'CONN_MAX_AGE': getenv('DB_CONN_MAX_AGE', type=int, default=0),
}
}
if os.environ.get('DB_ENGINE') == 'oracle':
DATABASES['default']['OPTIONS'] = {'threaded': True, 'use_returning_into': False}
# Password validation
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = []
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'pt-BR'
TIME_ZONE = os.getenv('TIME_ZONE', 'UTC')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_PATH = os.getenv('STATIC_PATH', '/static/')
STATIC_URL = os.getenv(
'STATIC_URL',
(FORCE_SCRIPT_NAME + STATIC_PATH if FORCE_SCRIPT_NAME else STATIC_PATH),
)
STATIC_ROOT = os.getenv('STATIC_ROOT')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
# django-cors-headers
# https://pypi.org/project/django-cors-headers/
CORS_ORIGIN_ALLOW_ALL = getenv('CORS_ORIGIN_ALLOW_ALL', type=bool, default=True)
CORS_ORIGIN_WHITELIST = getenv('CORS_ORIGIN_WHITELIST', type=list, default=[])
CORS_ORIGIN_REGEX_WHITELIST = [
'%r' % value
for value in getenv('CORS_ORIGIN_REGEX_WHITELIST', type=list, default=[])
]
CORS_ALLOW_HEADERS = getenv(
'CORS_ALLOW_HEADERS', type=list, default=list(default_headers)
)
CORS_ALLOW_METHODS = getenv(
'CORS_ALLOW_METHODS', type=list, default=list(default_methods)
)
# Django REST framework
# http://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend'],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 100,
}
# Application definitions
APP_VERSION = '1.0.0'
APP_NAME = '{{ project_name }}'
APP_DESCRIPTION = 'A RESTfull API for project {{ project_name }}'
|
from django.conf.urls import url
from views import *
from django.contrib.sitemaps.views import sitemap
urlpatterns = [
url(r'^newsletter', newsletter,name='newsletter'),
url(r'^aboutus',AboutUs.as_view(),{'template_name':'aboutus.html'},name="aboutus"),
url(r'^contactus',ContactUs.as_view(),{'template_name':'contactus.html'},name="contactus"),
url(r'^privacy_policy',PrivacyPolicy.as_view(),{'template_name':'privacy_policy.html'},name="privacy_policy"),
url(r'^terms_conditions',TermsConditions.as_view(),{'template_name':'terms_conditions.html'},name="terms_conditions"),
url(r'^shipping_returns_policy',ShippingReturnsPolicy.as_view(),{'template_name':'shipping_returns_policy.html'},name="shipping_returns_policy"),
]
|
# --------------
# Import Libraries
import os
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
# Code starts here
df = pd.read_csv(path)
print(df.head())
df.columns = df.columns.str.lower()
df.columns = df.columns.str.replace(' ','_')
df = df.replace('NaN','np.nan')
df.isna().sum()
#print(df.head())
# Code ends here
# --------------
from sklearn.model_selection import train_test_split
df.set_index(keys='serial_number',inplace=True,drop=True)
# Code starts
df['established_date'] = pd.to_datetime(df['established_date'])
df['acquired_date'] = pd.to_datetime(df['acquired_date'])
X = df.drop('2016_deposits',axis = 1)
y = df['2016_deposits']
X_train,X_val,y_train,y_val = train_test_split(X,y,test_size=0.25,random_state = 3)
# Code ends here
# --------------
# time_col = X_train.select_dtypes(exclude=[np.number,'O']).columns
time_col = ['established_date', 'acquired_date']
# Code starts here
for col_name in time_col:
new_col_name = "since_"+col_name
X_train[new_col_name] = pd.datetime.now() - X_train[col_name]
X_train[new_col_name] = X_train[new_col_name].apply(lambda x: float(x.days)/365)
X_train.drop(columns=col_name,inplace=True)
X_val[new_col_name] = pd.datetime.now() - X_val[col_name]
X_val[new_col_name] = X_val[new_col_name].apply(lambda x: float(x.days)/365)
X_val.drop(columns=col_name,inplace=True)
# Code ends here
# --------------
from sklearn.preprocessing import LabelEncoder
cat = X_train.select_dtypes(include='O').columns.tolist()
# Code starts here
X_train.fillna(0,inplace=True)
X_val.fillna(0,inplace=True)
le = LabelEncoder()
#for col in cat:
#X_train[col] = le.fit_transform(X_train[col])
#X_val[col] = le.fit_transform(X_val[col])
X_train_temp = pd.get_dummies(data = X_train,columns = cat)
X_val_temp = pd.get_dummies(data =X_val, columns = cat)
# Code ends here
# --------------
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error
# Code starts here
dt = DecisionTreeRegressor(random_state =5)
dt.fit(X_train,y_train)
accuracy = dt.score(X_val,y_val)
y_pred = dt.predict(X_val)
rmse = np.sqrt(mean_squared_error(y_val,y_pred))
print('Accuracy',accuracy)
print('rmse',rmse)
# --------------
from xgboost import XGBRegressor
# Code starts here
xgb = XGBRegressor(max_depth = 50,learning_rate = 0.83, n_estimators = 100)
xgb.fit(X_train,y_train)
accuracy = xgb.score(X_val,y_val)
y_pred = xgb.predict(X_val)
rmse = np.sqrt(mean_squared_error(y_val,y_pred))
print('Accuracy',accuracy)
print('rmse',rmse)
# Code ends here
|
#!/usr/bin/env python
#
# this node subscribes to the /enu and /magnetic topics
#
# as soon as a sensor reading is received on both, an initial guess
# of the robot pose and orientation is computed and the parameter
# /roamfree/initialPose is set accordingly in the parameter server
#
# once done, the node terminates.
#
from sys import exit
import rospy
from threading import Lock
from math import sin, cos, atan2, pi
from geometry_msgs.msg import Vector3Stamped
from geometry_msgs.msg import PoseWithCovarianceStamped
ZMag = None
ZGPS = None
mutex = Lock()
def gps_cb(msg):
global mutex
global ZGPS
mutex.acquire()
ZGPS = msg
mutex.release()
def mag_cb(msg):
global mutex
global ZMag
mutex.acquire()
ZMag = msg
mutex.release()
rospy.init_node('firstPose', anonymous=True)
# load needed parameters
try:
gpsSO = rospy.get_param('/roamfree/gpsDisplacement')
except KeyError as e:
rospy.logfatal('\'%s\'not found in Parameter Server', e.args[0])
exit(1)
# subscribe to topics
rospy.Subscriber("/enu", PoseWithCovarianceStamped, gps_cb)
rospy.Subscriber("/magnetic", Vector3Stamped, mag_cb)
# loop at 10 Hz till both GPS and Mag measurements have been received
r = rospy.Rate(10)
done = False
while not rospy.is_shutdown() and not done:
mutex.acquire()
if ZMag != None and ZGPS != None:
# I have both readings, set the /roamfree/initialPose parameter
theta = atan2(ZMag.vector.x, ZMag.vector.y)
Q = {'w': cos(theta/2.0), 'x': 0.0, 'y': 0.0, 'z': sin(theta/2.0)}
POSE = {
'x': ZGPS.pose.pose.position.x - gpsSO['x']*cos(theta) + gpsSO['y']*sin(theta), \
'y': ZGPS.pose.pose.position.y - gpsSO['y']*cos(theta) - gpsSO['x']*sin(theta), \
'z': -gpsSO['z'] + ZGPS.pose.pose.position.z \
}
rospy.set_param('/roamfree/initialPose/position', POSE)
rospy.set_param('/roamfree/initialPose/orientation', Q)
done = True
mutex.release()
r.sleep()
|
import os
from google.cloud import datastore
PROJECT_ID = os.environ.get('PROJECT_ID', 'ccblender')
def list_instances_of_word(word):
client = datastore.Client(PROJECT_ID)
query = client.query(kind='Captioned Word')
query.add_filter('word', '=', word)
word_instances = list(query.fetch())
if len(word_instances) == 0:
raise ValueError('No instances of that word were found')
return word_instances
|
# -*- coding: utf-8 -*-
import logging
import os
import sys
import json
import requests
import random
import datetime
from flask import Flask
from flask import request
from flask import Response
from mattermost_giphy.settings import *
logging.basicConfig(
level=logging.INFO, format='[%(asctime)s] [%(levelname)s] %(message)s')
app = Flask(__name__)
@app.route('/new_post')
def root():
"""
Home handler
"""
print("la")
return "OK"
@app.route('/', methods=['POST'])
def new_post():
"""
Mattermost new post event handler
"""
try:
bo = True
# NOTE: common stuff
slash_command = False
resp_data = {}
resp_data['username'] = USERNAME
resp_data['icon_url'] = ICON_URL
data = request.form
print(data)
if not 'token' in data:
raise Exception('Missing necessary token in the post data')
#if MATTERMOST_GIPHY_TOKEN.find(data['token']) == -1:
# raise Exception('Tokens did not match, it is possible that this request came from somewhere other than Mattermost')
# NOTE: support the slash command
if 'command' in data:
slash_command = True
resp_data['response_type'] = 'in_channel'
if data.get('channel_name')==u'bingochan':
if datetime.datetime.today().weekday() == 3:
if data.get('text').lower()==u'in':
if (int((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%H'))>=10) and (int((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%H'))<=13):
print('<div title="player-name-{}">{}</div> !'.format((datetime.datetime.today()+datetime.timedelta(hours=+1)).strftime('%Y-%m-%d')
,data.get('user_name').title()) )
print("la")
resp_data['text'] = '''`{}` joined the game! Be ready at 1:45p.m.\n'''.format(data.get('user_name').title())
elif (int((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%H'))<10):
print((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%H'))
resp_data['text'] = '''`{}` is a little too soon! See ya later!\n'''.format(data.get('user_name').title())
else:
print((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%H'))
resp_data['text'] = '''`{}` is a little too late! See ya next week!\n'''.format(data.get('user_name').title())
elif data.get('text').startswith(u'bet '):
betext = data.get('text').split()[1:]
print("ici")
print(betext)
if (int((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%H'))>=10) and (int((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%H'))<=13):
print('<div title="betting-name-{}">{}_{}_{}</div> !'.format((datetime.datetime.today()+datetime.timedelta(hours=+1)).strftime('%Y-%m-%d')
,data.get('user_name').title(), betext[0], betext[1]))
resp_data['text'] = '''`{}` placed a {} bet on `{}` !\n'''.format(data.get('user_name').title(), betext[0], betext[1])
else:
print("ici2")
print((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%H'))
resp_data['text'] = '''Bookie is closed for now'''
elif data.get('text').lower()==u'bingo!':
if (int((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%H'))>=13) and (int((datetime.datetime.today()).strftime('%M'))>=00):
print('<div title="winner-name-{}">{}_{}</div> !'.format((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%Y-%m-%d')
,data.get('user_name').title(),(datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%Y-%m-%d-%H-%M-%s')))
resp_data['text'] = '''`{}` just claimed a bingo! \n'''.format(data.get('user_name').title())
elif (int((datetime.datetime.today()+datetime.timedelta(hours=+2)).strftime('%H'))>13):
resp_data['text'] = '''It's too late to bingo !\n'''.format(data.get('user_name').title())
else:
resp_data['text'] = '''It's too soon to bingo !\n'''.format(data.get('user_name').title())
else:
bo = False
else:
resp_data['text'] = '''Sorry `{}`, there is no bingo today, see you on Thursday!\n'''.format(data.get('user_name').title())
else:
bo = False
except Exception as err:
msg = err.message
logging.error('unable to handle new post :: {}'.format(msg))
resp_data['text'] = msg
finally:
resp = Response(content_type='application/json')
resp.set_data(json.dumps(resp_data))
if bo:
return resp
|
import unittest
from operator import attrgetter
import obonet
from pyobo import SynonymTypeDef, get
from pyobo.struct import Reference
from pyobo.struct.struct import (
iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties,
iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs,
)
from tests.constants import TEST_CHEBI_OBO_PATH
class TestParseObonet(unittest.TestCase):
""""""
@classmethod
def setUpClass(cls) -> None:
cls.graph = obonet.read_obo(TEST_CHEBI_OBO_PATH)
def test_get_graph_typedefs(self):
"""Test getting type definitions from an :mod:`obonet` graph."""
pairs = {
(typedef.prefix, typedef.identifier)
for typedef in iterate_graph_typedefs(self.graph, 'chebi')
}
self.assertIn(('chebi', 'has_part'), pairs)
def test_get_graph_synonym_typedefs(self):
"""Test getting synonym type definitions from an :mod:`obonet` graph."""
synonym_typedefs = sorted(iterate_graph_synonym_typedefs(self.graph), key=attrgetter('id'))
self.assertEqual(
sorted([
SynonymTypeDef(id='IUPAC_NAME', name='IUPAC NAME'),
SynonymTypeDef(id='BRAND_NAME', name='BRAND NAME'),
SynonymTypeDef(id='INN', name='INN'),
], key=attrgetter('id')),
synonym_typedefs,
)
def test_get_node_synonyms(self):
"""Test getting synonyms from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
synonyms = list(iterate_node_synonyms(data))
self.assertEqual(1, len(synonyms))
synonym = synonyms[0]
self.assertEqual('N,N,N-tributylbutan-1-aminium fluoride', synonym.name, msg='name parsing failed')
self.assertEqual('EXACT', synonym.specificity, msg='specificity parsing failed')
# TODO implement
# self.assertEqual(SynonymTypeDef(id='IUPAC_NAME', name='IUPAC NAME'), synonym.type)
def test_get_node_properties(self):
"""Test getting properties from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
properties = list(iterate_node_properties(data))
t_prop = 'http://purl.obolibrary.org/obo/chebi/monoisotopicmass'
self.assertIn(t_prop, {prop for prop, value in properties})
self.assertEqual(1, sum(prop == t_prop for prop, value in properties))
value = [value for prop, value in properties if prop == t_prop][0]
self.assertEqual('261.28318', value)
def test_get_node_parents(self):
"""Test getting parents from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
parents = list(iterate_node_parents(data))
self.assertEqual(2, len(parents))
self.assertEqual({'24060', '51992'}, {
parent.identifier
for parent in parents
})
self.assertEqual({'chebi'}, {
parent.prefix
for parent in parents
})
def test_get_node_xrefs(self):
"""Test getting parents from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
xrefs = list(iterate_node_xrefs(data))
self.assertEqual(7, len(xrefs))
# NOTE the prefixes are remapped by PyOBO
self.assertEqual({'pubmed', 'cas', 'beilstein', 'reaxys'}, {
xref.prefix
for xref in xrefs
})
self.assertEqual(
{
('reaxys', '3570522'), ('beilstein', '3570522'), ('cas', '429-41-4'),
('pubmed', '21142041'), ('pubmed', '21517057'), ('pubmed', '22229781'), ('pubmed', '15074950'),
},
{(xref.prefix, xref.identifier) for xref in xrefs}
)
def test_get_node_relations(self):
"""Test getting relations from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:17051']
relations = list(iterate_node_relationships(data, default_prefix='chebi'))
self.assertEqual(1, len(relations))
typedef, target = relations[0]
self.assertIsNotNone(target)
self.assertIsInstance(target, Reference)
self.assertEqual('chebi', target.prefix)
self.assertEqual('29228', target.identifier)
self.assertIsNotNone(typedef)
self.assertIsInstance(typedef, Reference)
self.assertEqual('chebi', typedef.prefix)
self.assertEqual('is_conjugate_base_of', typedef.identifier)
class TestGet(unittest.TestCase):
"""Test generation of OBO objects."""
def test_get_obo(self):
"""Test getting an OBO document."""
obo = get('chebi', url=TEST_CHEBI_OBO_PATH, local=True)
terms = list(obo)
self.assertEqual(18, len(terms))
|
#!/usr/bin/python3
"""
Base module
"""
import json
class Base:
"""
Base class
Attributes:
__nb_objects: private class attribute
"""
__nb_objects = 0
def __init__(self, id=None):
"""
Initialization method
Args:
id
"""
if id is not None:
self.id = id
else:
Base.__nb_objects += 1
self.id = Base.__nb_objects
@staticmethod
def to_json_string(list_dictionaries):
"""
Returns the JSON string representation of list_dictionaries
"""
if list_dictionaries is None:
return "[]"
else:
return json.dumps(list_dictionaries)
@classmethod
def save_to_file(cls, list_objs):
"""
writes the JSON string representation of list_objs to a file
"""
list_f = [obj.to_dictionary() for obj in list_objs]
filename = [obj.__class__.__name__ for obj in list_objs][0] + '.json'
with open(filename, 'w', encoding="utf-8") as f:
return f.write(cls.to_json_string(list_f))
@staticmethod
def from_json_string(json_string):
"""
returns the list of the JSON string representation json_string
"""
if json_string == "" or json_string is None:
return []
return json.loads(json_string)
|
#print("Yo what's yo name")
#name=input()
#if name == "Leo" or name== "Mikael":
# print("Group 2")
#elif name == "Justis" or name == "David":
# print("Group 3")
#elif name == "Kayla" or name == "Gen":
# print("Group 4")
#else:
# print("IDK")
print("whats yo age")
age = float (input())
if age <4:
print("Not in School")
elif 4<age<5:
print("Pre School")
elif 5<age<10:
print("Lower School")
elif 10<age<13:
print("middle school")
elif 13<age<18:
print("upper school")
elif age>18:
print("wasdads?")
|
############################################################################
# #
# Copyright (c) 2017 eBay Inc. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import print_function
from __future__ import division
from compat import PY3, unquote_plus
if PY3:
from urllib.request import install_opener, build_opener, AbstractHTTPHandler
from http.client import HTTPConnection
else:
from urllib2 import install_opener, build_opener, AbstractHTTPHandler
from httplib import HTTPConnection
import socket
class UnixHTTPConnection(HTTPConnection):
def __init__(self, host, *a, **kw):
HTTPConnection.__init__(self, 'localhost', *a, **kw)
self.unix_path = unquote_plus(host.split(':', 1)[0])
def connect(self):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.unix_path)
self.sock = s
class UnixHTTPHandler(AbstractHTTPHandler):
def unixhttp_open(self, req):
return self.do_open(UnixHTTPConnection, req)
unixhttp_request = AbstractHTTPHandler.do_request_
install_opener(build_opener(UnixHTTPHandler))
|
from . import views
from django.urls import path
urlpatterns =[
path('/',views.BookListView.as_view(),name='book.all'),
path('/index',views.index,name='book.all.index'),
path('/<int:pk>',views.BookDetailView.as_view(),name='book.show'),
path('/book/<int:id>',views.show,name='book.show.index'),
path('/<int:id>/review',views.review,name='book.review'),
path('/<str:author>',views.author,name='author.books'),
] |
from unittest.mock import MagicMock
import pytest
from riotwatcher._apis.legends_of_runeterra import MatchApi
@pytest.fixture(params=["match_id_001122"])
def match_id(request):
return request.param
@pytest.mark.lor
@pytest.mark.unit
class TestMatchApi:
def test_by_puuid(self, region, puuid):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
match = MatchApi(mock_base_api)
ret = match.by_puuid(region, puuid)
mock_base_api.raw_request.assert_called_once_with(
MatchApi.__name__,
match.by_puuid.__name__,
region,
f"https://{region}.api.riotgames.com/lor/match/v1/matches/by-puuid/{puuid}/ids",
{},
)
assert ret is expected_return
def test_by_id(self, region, match_id):
mock_base_api = MagicMock()
expected_return = object()
mock_base_api.raw_request.return_value = expected_return
match = MatchApi(mock_base_api)
ret = match.by_id(region, match_id)
mock_base_api.raw_request.assert_called_once_with(
MatchApi.__name__,
match.by_id.__name__,
region,
f"https://{region}.api.riotgames.com/lor/match/v1/matches/{match_id}",
{},
)
assert ret is expected_return
|
from functools import reduce
# 匿名函数
stm = lambda x, y: x + y
print(stm(1, 2))
# 高阶函数:把函数作为参数使用
def printC(n):
return n* 3
def mul(n,f):
return printC(n) * 100
print(mul(3,3))
# map
# 映射,把集合里的每个元素按照一定规则进行操作,生成一个新的列表
l1 = [i for i in range(0, 10)]
def mulTen(n):
return n * 10
l3 = []
l2 = map(mulTen, l1)
for i in l2:
print(i)
l3.append(i)
print(l3)
# reduce 归并,缩减
l = [1,2,3,4,5]
def add(m, n):
return m + n
print(reduce(add, l))
|
# Copyright 2018 Adrien Guinet <adrien@guinet.me>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pydffi
import struct
import sys
from common import DFFITest
class CXXTest(DFFITest):
def __init__(self, *args, **kwargs):
super(CXXTest, self).__init__(*args, **kwargs)
self.options = {'CXX': pydffi.CXXMode.Std11}
def test_cxx(self):
FFI = self.FFI
CU = FFI.compile('''
template <class T>
static T foo(T a, T b) { return a+b; }
extern "C" int foo_int(int a, int b) { return foo(a,b); }
''')
self.assertEqual(CU.funcs.foo_int(4,5).value, 9)
if __name__ == '__main__':
unittest.main()
|
import cv2
import numpy as np
import os
import time
from tqdm import tqdm
import shutil
import argparse
from glob import glob
import torchvision.transforms as transforms
import torchvision.models as models
import torch.nn as nn
import torch
import torch.backends.cudnn as cudnn
from test_spatial_dataloader import *
from test_motion_dataloader import *
from utils import *
from network import *
import json
import matplotlib.pyplot as plt
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser(description='video test for two stream')
parser.add_argument('--batch-size', default=19, type=int, metavar='N', help='mini-batch size (default: 25)')
parser.add_argument('--lr', default=5e-4, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--video_name', type=str, default='csce636test/6000_4.mp4')
arg = parser.parse_args()
rgb_whole_pred = {}
opf_whole_pred = {}
def main(start_frame):
# spatial Prepare DataLoader
spatial_data_loader = test_spatial_dataloader(
BATCH_SIZE=arg.batch_size,
num_workers=8,
path='record/test/temp_chunk/',
)
spatial_test_loader = spatial_data_loader.run()
spatial_model = MODEL(
lr=arg.lr,
batch_size=arg.batch_size,
resume='555_rgb_model_best.pth.tar',
evaluate='evaluate',
test_loader=spatial_test_loader,
channel=3,
start_frame=start_frame,
)
spatial_model.run()
# motion prepare dataloader
motion_data_loader = test_motion_dataloader(
BATCH_SIZE=arg.batch_size,
num_workers=8,
in_channel=10,
path='record/test/temp_opf/'
)
motion_test_loader = motion_data_loader.run()
motion_model = MODEL(
test_loader=motion_test_loader,
resume='475_opt_model_best.pth.tar',
evaluate='evaluate',
lr=arg.lr,
batch_size=arg.batch_size,
channel=10 * 2,
start_frame=start_frame,
)
motion_model.run()
class MODEL():
def __init__(self, lr, batch_size, resume, evaluate, test_loader, channel,
start_frame):
self.lr = lr
self.batch_size = batch_size
self.resume = resume
self.evaluate = evaluate
self.test_loader = test_loader
self.best_prec1 = 0
self.channel = channel
self.start_frame = start_frame
def build_model(self):
self.model = resnet101(pretrained=True, channel=self.channel).cuda()
def resume_and_evaluate(self):
if self.resume:
if os.path.isfile(self.resume):
checkpoint = torch.load(self.resume)
self.model.load_state_dict(checkpoint['state_dict'])
else:
print("==> no checkpoint found at '{}'".format(self.resume))
if self.evaluate:
self.epoch = 0
self.validate_1epoch()
return
def run(self):
self.build_model()
self.resume_and_evaluate()
def validate_1epoch(self):
batch_time = AverageMeter()
self.model.eval()
end = time.time()
progress = tqdm(self.test_loader)
with torch.no_grad():
for i, (keys, data, label) in enumerate(progress):
data = data.cuda()
output = self.model(data)
batch_time.update(time.time() - end)
end = time.time()
preds = output.data.cpu().numpy()
if self.channel == 20:
opf_whole_pred[str(self.start_frame)] = np.sum(preds, axis=0)
else:
rgb_whole_pred[str(self.start_frame)] = np.sum(preds, axis=0)
return
def cal_for_frames(video_path, video_name, flow_path):
frames = glob(os.path.join(video_path, '*.jpg'))
frames.sort()
prev = cv2.UMat(cv2.imread(frames[0]))
prev = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
for i, frame_curr in enumerate(frames[1:]):
curr = cv2.UMat(cv2.imread(frame_curr))
curr = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)
tmp_flow = compute_TVL1(prev, curr)
prev = curr
if not os.path.exists(os.path.join(flow_path, video_name + '_u')):
os.mkdir(os.path.join(flow_path, video_name + '_u'))
cv2.imwrite(os.path.join(flow_path, video_name + '_u', "{:06d}.jpg".format(i + 1)), tmp_flow[:, :, 0])
if not os.path.exists(os.path.join(flow_path, video_name + '_v')):
os.mkdir(os.path.join(flow_path, video_name + '_v'))
cv2.imwrite(os.path.join(flow_path, video_name + '_v', "{:06d}.jpg".format(i + 1)), tmp_flow[:, :, 1])
return
def compute_TVL1(prev, curr, bound=15):
"""Compute the TV-L1 optical flow."""
TVL1 = cv2.optflow.DualTVL1OpticalFlow_create()
flow = TVL1.calc(prev, curr, None)
flow = cv2.UMat.get(flow)
assert flow.dtype == np.float32
flow = (flow + bound) * (255.0 / (2 * bound))
flow = np.round(flow).astype(int)
flow[flow >= 255] = 255
flow[flow <= 0] = 0
return flow
def extract_flow(video_path, video_name, flow_path):
cal_for_frames(video_path, video_name, flow_path)
print('complete:' + flow_path + video_name)
return
def softmax(data):
data_exp = np.exp(data)
return data_exp / np.sum(data_exp)
def save_fig(x, y, title, save_path):
plt.figure()
plt.plot(x, y, linewidth=2, color='lightskyblue')
plt.xlabel('time/s')
plt.ylabel('Slipping probability')
plt.ylim(0, 1.1)
plt.xlim(0, duration + 0.2)
plt.title(title)
plt.savefig(save_path)
# plt.show()
def revise_order(path, isRGB):
path_sub = os.listdir(path)
path_sub.sort()
if isRGB:
for j in range(len(path_sub)):
old_name = path + path_sub[j]
new_name = path + 'frame_' + str(j+1).zfill(6) + '.jpg'
os.rename(old_name, new_name)
else:
for j in range(len(path_sub)):
old_name = path + path_sub[j]
new_name = path + str(j+1).zfill(6) + '.jpg'
os.rename(old_name, new_name)
def make_sure_path(path):
if not os.path.exists(path):
os.mkdir(path)
if __name__ == '__main__':
make_sure_path('record/')
file_path = arg.video_name
video_title = file_path.split('/')[-1][:-4]
print(video_title)
cap = cv2.VideoCapture(file_path)
if cap.isOpened():
rate = cap.get(5)
FrameNumber = cap.get(7)
duration = FrameNumber / rate
width = cap.get(3)
height = cap.get(4)
print(duration)
rgb_outPutDirName = 'record/temp_chunk/'
opf_outPutDirName = 'record/temp_opf/'
make_sure_path(rgb_outPutDirName)
make_sure_path(opf_outPutDirName)
index = 0
while True:
res, image = cap.read()
if not res:
print('not res , not image')
break
else:
if width < height:
pad = int((height - width) // 2 + 1)
image = cv2.copyMakeBorder(image, 0, 0, pad, pad, cv2.BORDER_CONSTANT, value=0)
image = cv2.resize(image, (342, 256))
cv2.imwrite(rgb_outPutDirName + 'frame_' + str(index + 1).zfill(6) + '.jpg', image)
index += 1
print('extract rgb finished')
extract_flow(rgb_outPutDirName, 'v_temp_opf', opf_outPutDirName)
cap.release()
time_lable = {}
make_sure_path('record/test/')
make_sure_path('record/test/temp_chunk/')
make_sure_path('record/test/temp_opf/')
make_sure_path('record/test/temp_opf/v_temp_opf_u/')
make_sure_path('record/test/temp_opf/v_temp_opf_v/')
frame = 1
for i in range(1, 41):
shutil.copyfile(rgb_outPutDirName + 'frame_' + "{:06d}.jpg".format(i),
'record/test/temp_chunk/' + 'frame_' + "{:06d}.jpg".format(i))
shutil.copyfile(opf_outPutDirName + 'v_temp_opf_u/' + "{:06d}.jpg".format(i),
'record/test/temp_opf/v_temp_opf_u/' + "{:06d}.jpg".format(i))
shutil.copyfile(opf_outPutDirName + 'v_temp_opf_v/' + "{:06d}.jpg".format(i),
'record/test/temp_opf/v_temp_opf_v/' + "{:06d}.jpg".format(i))
frame = 41
StartFrame = frame - 41
main(StartFrame)
while frame + 10 < index:
for i in range(1, 11):
os.remove('record/test/temp_chunk/' + 'frame_' + "{:06d}.jpg".format(i))
shutil.copyfile(rgb_outPutDirName + 'frame_' + "{:06d}.jpg".format(frame),
'record/test/temp_chunk/' + 'frame_' + "{:06d}.jpg".format(frame))
os.remove('record/test/temp_opf/v_temp_opf_u/' + "{:06d}.jpg".format(i))
shutil.copyfile(opf_outPutDirName + 'v_temp_opf_u/' + "{:06d}.jpg".format(frame),
'record/test/temp_opf/v_temp_opf_u/' + "{:06d}.jpg".format(frame))
os.remove('record/test/temp_opf/v_temp_opf_v/' + "{:06d}.jpg".format(i))
shutil.copyfile(opf_outPutDirName + 'v_temp_opf_v/' + "{:06d}.jpg".format(frame),
'record/test/temp_opf/v_temp_opf_v/' + "{:06d}.jpg".format(frame))
frame += 1
revise_order('record/test/temp_chunk/', True)
revise_order('record/test/temp_opf/v_temp_opf_u/', False)
revise_order('record/test/temp_opf/v_temp_opf_v/', False)
StartFrame += 10
main(StartFrame)
fig_x, fig_y, fig_y_rgb, fig_y_opf = [], [], [], []
for key in list(rgb_whole_pred.keys()):
cur_time = float(key) / rate
new_key = str(float('%.3f' % cur_time))
new_value = softmax(rgb_whole_pred[key] + 1 * opf_whole_pred[key]).tolist()
rgb_value = softmax(rgb_whole_pred[key]).tolist()
opf_value = softmax(opf_whole_pred[key]).tolist()
time_lable[new_key] = new_value[0]
fig_x.append(cur_time)
fig_y.append(new_value[0])
fig_y_rgb.append(rgb_value[0])
fig_y_opf.append(opf_value[0])
point_num = len(fig_y_rgb)
one_count = 0
zero_count = 0
for i in fig_y_rgb:
if abs(i-1) < 1e-4:
one_count += 1
if abs(i-0) < 1e-4:
zero_count += 1
if one_count/point_num > 0.9:
time_lable = {str(float('%.3f'%(float(key)/rate))): softmax(opf_whole_pred[key]).tolist()[0] for key in
list(rgb_whole_pred.keys())}
fig_y = fig_y_opf.copy()
elif zero_count / point_num > 0.9:
time_lable = {str(float('%.3f' % (float(key) / rate))): softmax(rgb_whole_pred[key]).tolist()[0] for key in
list(rgb_whole_pred.keys())}
fig_y = fig_y_rgb.copy()
json_str = json.dumps(time_lable)
with open(video_title + '_' + 'timelable.json', 'w') as json_file:
json_file.write(json_str)
json_file.close()
fig_x_1 = fig_x[:1]
fig_y_1 = fig_y[:1]
fig_y_rgb_1 = fig_y_rgb[:1]
fig_y_opf_1 = fig_y_opf[:1]
for i in range(1, len(fig_x)):
fig_x_1.append(fig_x[i] - 0.001)
fig_x_1.append(fig_x[i])
fig_y_1.append(fig_y_1[-1])
fig_y_1.append(fig_y[i])
fig_y_rgb_1.append(fig_y_rgb_1[-1])
fig_y_rgb_1.append(fig_y_rgb[i])
fig_y_opf_1.append(fig_y_opf_1[-1])
fig_y_opf_1.append(fig_y_opf[i])
fig_x_1.append(duration)
fig_y_1.append(fig_y[-1])
fig_y_rgb_1.append(fig_y_rgb[-1])
fig_y_opf_1.append(fig_y_opf[-1])
save_fig(fig_x_1, fig_y_1, 'two stream network', 'bucket3/' + video_title + '_Part6.jpg')
# save_fig(fig_x_1, fig_y_rgb_1, 'spatial stream network', 'bucket3/' + video_title + '_rgb' + '_Part6.jpg')
# save_fig(fig_x_1, fig_y_opf_1, 'motion stream network', 'bucket3/' + video_title + '_opf' + '_Part6.jpg')
rgb_whole_pred = {}
opf_whole_pred = {}
shutil.rmtree('record/')
|
"""
Copy this file to local_settings.py and edit the values.
Make sure you set the DJANGO_SETTINGS_MODULE environment variable to local_settings as well.
"""
# Local environment?
# from totemag.settings.local import *
# Production environment?
# from totemag.settings.production import *
# Override database settings
#DATABASES['default']['USER'] = 'root'
#DATABASES['default']['PASSWORD'] = 'password'
# super secret salt hash
#SECRET_KEY = ''
# api key to get into mailchimp
#MAILCHIMP_API_KEY = ''
# email credentials
#EMAIL_HOST_PASSWORD = ''
|
from mininet.node import OVSSwitch
class OVSSwitchSTP(OVSSwitch):
prio = 1000
def start(self, *args, **kwargs):
OVSSwitch.start(self, *args, **kwargs)
OVSSwitchSTP.prio += 1
self.cmd('ovs-vsctl set-fail-mode', self, 'standalone')
self.cmd('ovs-vsctl set-controller', self)
self.cmd('ovs-vsctl set Bridge', self,
'stp_enable=true',
'other_config:stp-priority=%d' % OVSSwitchSTP.prio)
switches = {'ovs-stp': OVSSwitchSTP}
|
import math
from random import random
from scipy.stats.distributions import chi2
values = [86,133,75,22,11,144,78,122,8,146,33,41,99]
values.sort()
def func_distro_expo(x, media):
return 1 - math.e**(-x/float(media))
def func_distro_exp_values(n, media):
values_f = []
for i in values:
values_f.append(func_distro_expo(i, media))
return values_f
def generator_unif_values(n):
values = []
for _ in range(n):
values.append(random())
values.sort()
return values
def generator_steps(n):
steps = []
for i in range(n+1):
steps.append(i/float(n))
return steps
def d_generator(n, media):
values = func_distro_exp_values(n, media)
steps = generator_steps(n)
max_global = 0
for i in range(n):
max_local = max(steps[i+1] - values[i], values[i] - steps[i])
max_global = max(max_local, max_global)
return max_global
def u_generator(n):
values = generator_unif_values(n)
steps = generator_steps(n)
max_global = 0
for i in range(n):
max_local = max(steps[i+1] - values[i], values[i] - steps[i])
max_global = max(max_local, max_global)
return max_global
def valor_p(n, media, iterations):
d = d_generator(n, media)
print "d: ", d
p_value = 0
for _ in range(iterations):
D = u_generator(n)
if (D >= d):
p_value += 1
return p_value / float(iterations)
print valor_p(13, 50, 500) |
numb = 3
floatt = 3.14
print(type(numb)) #returns type of variable
print(type(floatt)) #this'll be a float instead of an integer
#Arithmetic Operators:
# Addition: + Subtraction: -
#Multiplication: * Division: / Floor Division: //
#Exponent: ** Modulus: % (get the remainder of division)
numb *= 10
floatt += 10 #multiply and add by ten respectivly
print(round(4.12)) #rounds a number
print(round(4.12,2)) #round to a specific amount
print(numb == floatt) #checks whether or not it's equal and releases true or false
#or != to check if it Isn't Equal, releases True if it ISN't Equal
# Greater than or equal to : >= , <=, > , < they're all useable too
garlics = '101'
cloves = '823'
#they're strings right now, but you can Cast them as integers
numGarlics = int(garlics) #integer version of garlics
cloves = int(cloves) #do it to itself too
print(numGarlics + cloves)
|
class Loc:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return '{} {}'.format(self.x, self.y)
# ========== Key Map ==========
# BTN
QUEST = Loc(359, 1035)
SORT = Loc(359, 1120)
MAKE_EQUIP = Loc(359, 1220)
# SKILL
SKILL1 = Loc(664, 796)
SKILL2 = Loc(664, 898)
SKILL3 = Loc(664, 997)
SKILL4 = Loc(664, 1096)
# OPT
L_OPT = Loc(230, 922)
M_OPT = Loc(360, 922)
R_OPT = Loc(490, 922)
# SHOP
SHOP_1 = Loc(191, 630)
SHOP_2 = Loc(359, 630)
SHOP_3 = Loc(530, 630)
SHOP_4 = Loc(191, 794)
SHOP_5 = Loc(359, 794)
SHOP_6 = Loc(530, 794)
SHOP_ALL = [SHOP_1, SHOP_2, SHOP_3, SHOP_4, SHOP_5, SHOP_6]
# ========== Key Map ==========
|
import pandas as pd
import numpy as np
import glob
import os
class functionals:
# instance attributes
def __init__(self, path, indir, outdir):
self.path = path
self.fig_id = fig_id
self.orig_headers = [
'CREDIT SCORE', 'FIRST PAYMENT DATE', 'FIRST TIME HOMEBUYER FLAG', 'MATURITY DATE',
'MSA', 'MI %', 'NUMBER OF UNITS', 'OCCUPANCY STATUS', 'ORIGINAL CLTV',
'ORIGINAL DTI', 'ORIGINAL UPB', 'ORIGINAL LTV', 'ORIGINAL INTEREST RATE',
'CHANNEL', 'PPM FLAG', 'PRODUCT TYPE', 'PROPERTY STATE', 'PROPERTY TYPE',
'POSTAL CODE', 'LOAN SEQUENCE NUMBER', 'LOAN PURPOSE', 'ORIGINAL LOAN TERM',
'NUMBER OF BORROWERS', 'SELLER NAME', 'SERVICER NAME', 'UNKNOWN'
]
self.mp_headers = [
'LOAN SEQUENCE NUMBER', 'MONTHLY REPORTING PERIOD', 'CURRENT ACTUAL UPB',
'CURRENT LOAN DELINQUENCY STATUS', 'LOAN AGE', 'REMAINING MONTHS TO LEGAL MATURITY',
'REPURCHASE FLAG', 'MODIFICATION FLAG', 'ZERO BALANCE CODE',
'ZERO BALANCE EFFECTIVE DATE', 'CURRENT INTEREST RATE', 'CURRENT DEFERRED UPB',
'DDLPI', 'MI RECOVERIES', 'NET SALES PROCEEDS', 'NON MI RECOVERIES', 'EXPENSES',
'LEGAL COSTS', 'MAINTENANCE AND PRESERVATION COSTS', 'TAXES AND INSURANCE',
'MISCELLANEOUS EXPENSES', 'ACTUAL LOSS CALCULATION', 'MODIFICATION COST',
'STEP MODIFICATION FLAG', 'DEFERRED PAYMENT MODIFICATION', 'ELTV', 'ZERO BALANCE REMOVAL UPB',
'DELINQUENT ACCRUED INTEREST'
]
self.indir = indir
self.outdir = outdir
# save figure
def save_fig(self, self.fig_id, tight_layout=True):
self.path = os.path.join(self.fig_id + ".png")
print("Saving figure", self.fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(self.path, format='png', dpi=300)
# concatenate origin data
def orig_concatter(self):
self.files = glob.glob(self.path)
with open('orig.txt','w') as result:
for file_ in files:
for line in open(file_, 'r'):
result.write(line)
# concatenate monthly performance data
def mp_concatter(self, self.indir, self.outdir):
self.filelist = glob.glob(self.indir)
self.dflist = []
for file in filelist:
print(file)
self.data = pd.read_csv(file, delimiter='|', names=self.mp_headers, low_memory=False)
self.data = self.data[self.cols_p]
self.data.dropna(inplace=True)
self.dflist.append(self.data)
self.concatdf = pd.concat(dflist, axis=0)
self.concatdf.to_csv(self.outdir, index=False)
return self.concatdf
def data_cleaner(self, self.mp, self.orig):
self.data = self.mp.set_index('LOAN SEQUENCE NUMBER').join(self.orig.set_index('LOAN SEQUENCE NUMBER'))
self.data.dropna(inplace=True)
self.data = self.data[(self.data['CREDIT SCORE'] >= 301) & (self.data['CREDIT SCORE'] <= 850)]
self.data = self.data[(self.data['ORIGINAL CLTV'] >= 0) & (self.data['ORIGINAL CLTV'] <= 200)]
self.data = self.data[(self.data['ORIGINAL DTI'] >= 0) & (self.data['ORIGINAL DTI'] <= 65)]
self.data['MI %'].replace(999, 0, inplace=True)
self.data['ZERO BALANCE CODE'].replace({1: 0, 9: 1, 6: 1, 3: 1, 2: 1, 15: 1}, inplace=True)
return self.data
|
def add(x, y):
z = float ( x ) + float ( y )
print "The required Sum is: {}".format ( z )
return z
def add(x, y):
z = float ( x ) + float ( y )
print "The required Sum is: {}".format ( z )
return z
print add ( 5, 8 )
# If a & b are strings
def add(x, y):
try:
a = float ( x )
b = float ( y )
except ValueError:
return None
else:
return True
print add( 'Hi', 'Hello' )
def addSum(x, y):
return x + y
print addSum ( 2.2, 5.6 )
print addSum ( float ( 2 ), float ( 5 ) )
print addSum ( 2, 5 )
print addSum ( 'Hi', 'Hello' )
|
from django.contrib import admin
from .models import attachment
class AttachmentAdmin(admin.ModelAdmin):
menu_title = "Attachment"
menu_group = "Attachment"
list_display = ['begin_date', 'end_date', 'name', 'cycle','format',
'description', 'file', 'content_type', 'objid']
admin.site.register(attachment.Attachment, AttachmentAdmin) |
# Python3 module: compactSchemes
"""Python module that reads the alpha-coefs from db files"""
import sqlite3 as db
def get_alphas(db_filename,nbs_id):
"""Return the list of alpha coeficients in table nbs(nbs_id)"""
conn = db.connect(db_filename)
conn.row_factory = db.Row
cur = conn.cursor()
cur.execute("SELECT * FROM nbs WHERE id=?", (nbs_id,))
row = cur.fetchone()
params = {}
for k, v in zip(row.keys()[1:], row[1:]):
params[k] = float(v)
return params |
#!/user/bin/env python
# coding=utf-8
"""
@file: tf基础.py
@author: zwt
@time: 2020/10/20 16:44
@desc:
"""
import tensorflow as tf
# 定义一个随机数标量
random_float = tf.random.uniform(shape=())
print(random_float)
# 定义一个有两个元素的零向量
zero_vector = tf.zeros(shape=(2))
print(zero_vector)
# 定已两个2*2的常量矩阵
A = tf.constant([[1., 2.], [3., 4.]])
B = tf.constant([[1., 2.], [3., 4.]])
C = tf.add(A, B)
print(C)
print(C.shape)
print(C.dtype)
print(C.numpy())
D = tf.matmul(A, B)
print(D)
# 自动求导机制
x = tf.Variable(initial_value=3.)
# 在 tf.GradientTape() 的上下文内,所有计算步骤都会被记录以用于求导
with tf.GradientTape() as tape:
y = tf.square(x)
# 计算y关于x的导数
y_grad = tape.gradient(y, x)
print(y, y_grad)
X = tf.constant([[1., 2.], [3., 4.]])
y = tf.constant([[1.], [2.]])
w = tf.Variable(initial_value=[[1.], [2.]])
b = tf.Variable(initial_value=1.)
with tf.GradientTape() as tape:
L = tf.reduce_sum(tf.square(tf.matmul(X, w) + b - y))
w_grad, b_grad = tape.gradient(L, [w, b]) # 计算L(w, b)关于w, b的偏导数
print(L, w_grad, b_grad) |
#!/bin/python3
import sys
#sys.path.append("./secret")
import grid_mdp
import random
grid = grid_mdp.Grid_Mdp();
states = grid.getStates();
actions = grid.getActions();
gamma = grid.getGamma();
def mc(gamma, state_sample, action_sample, reward_sample):
vfunc = dict();
nfunc = dict();
for s in states:
vfunc[s] = 0.0
nfunc[s] = 0.0
for iter1 in range(len(state_sample)):
G = 0.0
for step in range(len(state_sample[iter1])-1, -1, -1):
G *= gamma;
G += reward_sample[iter1][step];
for step in range(len(state_sample[iter1])):
s = state_sample[iter1][step]
vfunc[s] += G;
nfunc[s] += 1.0;
G -= reward_sample[iter1][step]
G /= gamma;
for s in states:
if nfunc[s] > 0.000001:
vfunc[s] /= nfunc[s]
print("mc")
print(vfunc)
return vfunc
def td(alpha, gamma, state_sample, action_sample, reward_sample):
vfunc = dict()
for s in states:
vfunc[s] = random.random()
for iter1 in range(len(state_sample)):
for step in range(len(state_sample[iter1])):
s = state_sample[iter1][step]
r = reward_sample[iter1][step]
if len(state_sample[iter1]) - 1 > step:
s1 = state_sample[iter1][step + 1]
next_v = vfunc[s1]
else:
next_v = 0.0;
vfunc[s] = vfunc[s] + alpha * (r + gamma * next_v - vfunc[s]);
print("")
print("td")
print(vfunc)
return vfunc
def tdn(alpha, gamma, state_sample, action_sample, reward_sample):
vfunc1 = dict()
vfunc2 = dict()
j = 0
for s in states:
vfunc1[s] = random.random()
vfunc2[s] = 0
for iter1 in range(len(state_sample)):
for step in range(len(state_sample[iter1])):
s = state_sample[iter1][step]
r = reward_sample[iter1][step]
if len(state_sample[iter1]) - 1 > step:
s1 = state_sample[iter1][step + 1]
next_v = vfunc1[s1]
else:
next_v = 0.0;
vfunc1[s] = vfunc1[s] + alpha * (r + gamma * next_v - vfunc1[s]);
j = j + 1
for i in range(step):
vfunc2[s] = (vfunc2[s] + (1 - gamma) * (gamma**(i - 1)) * vfunc1[s]);
vfunc2[s] = vfunc2[s]/j
print("")
print("tdn")
print(vfunc2)
return vfunc2
def nstep(alpha, gamma, state_sample, action_sample, reward_sample):
vfunc1 = dict()
vfunc2 = dict()
for s in states:
vfunc1[s] = random.random()
vfunc2[s] = 0
for iter1 in range(len(state_sample)):
for step in range(len(state_sample[iter1])):
s = state_sample[iter1][step]
r = reward_sample[iter1][step]
if len(state_sample[iter1]) - 1 > step:
s1 = state_sample[iter1][step + 1]
next_v = vfunc1[s1]
else:
next_v = 0.0;
vfunc1[s] = vfunc1[s] + alpha * (r + gamma * next_v - vfunc1[s]);
vfunc2[s] = (vfunc2[s] + vfunc1[s]) / 2
print("")
print("nstep")
print(vfunc2)
return vfunc2
if __name__ == "__main__":
# s, a, r = grid.gen_randompi_sample(5)
# print s
# print a
# print r
s, a, r = grid.gen_randompi_sample(1000)#迭代次数
mc(0.5, s, a, r)
td(0.15, 0.5, s, a, r)
tdn(0.15, 0.5, s, a, r)
nstep(0.15, 0.5, s, a, r) |
import os
cmd = 'ps | grep -c exchange-xiaozhi-express'
cnt = os.popen(cmd).read().replace('\n','');
print cnt
if cnt == '2':
cmd = '/etc/init.d/nodejs restart'
print cmd
os.system(cmd)
|
#!/usr/bin/env python3
from scipy.fftpack import dct
from random import randint, sample
from numpy import eye, zeros
from numpy.random import randn, permutation
def cs_data(m, n, s):
"""
BE SUPER CAREFUL THAT YOU ARE PASSING THE RIGHT VALUES HERE.
it seems that matlab is inclusive ranges and python is not so i can just
use the default values and be okay. pay attention!
"""
A = dct(eye(n))
#rows = sample(range(n), m)
rows = permutation(n)[:m]
A = A[rows]
x_ex = zeros((n,1))
#inds = sample(range(n), s)
inds = permutation(n)[:s]
x_ex[inds] = randn(s,1)
b = A.dot(x_ex)
return A, b, x_ex
|
from matplotlib import pyplot as plt
import numpy as np
y_1, y_2 = np.meshgrid(np.arange(-7, 7, .1), np.arange(-7, 7, .1))
y_1_dot = -y_1 + y_2
y_2_dot = -y_1 - y_2
start = [
[4, 4],
[4, -4],
[0, 4],
[4, 0],
[-4, -4],
[-4, 4],
[0, -4],
[-4, 0],
[np.sqrt(3), 1],
[1, np.sqrt(3)],
[-np.sqrt(3), -1],
[-1, -np.sqrt(3)],
[-np.sqrt(3), 1],
[-1, np.sqrt(3)],
[np.sqrt(3), -1],
[1, -np.sqrt(3)],
]
fig = plt.figure(1, figsize=(5, 5))
ax = fig.add_subplot(111)
ax.axis('equal')
ax.streamplot(
y_1, y_2, y_1_dot, y_2_dot,
density=.5,
linewidth=1,
start_points=start,
maxlength=5,
)
ax.set_xlabel(r"$y_1$")
ax.set_ylabel(r"$y_2$")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.tick_params(top="off", right="off", bottom="off", left="off")
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
circle = plt.Circle((0, 0), 0.8, color='white', zorder=10)
ax.add_artist(circle)
# plt.show()
plt.savefig("spiral_point.pdf", bbox_inches="tight", transparent=True)
plt.clf()
|
import os
from fabric.decorators import task
from fabric.tasks import execute
from fabric.api import env, cd, settings, sudo, run, put, hide, show
from fabric.contrib.files import append
from cStringIO import StringIO
from mercantile.config import config, required, string_list, contents_of_path, default_file, default
conf = config.add_group('servers', {
'name': unicode | required, # Name of the server. (Required)
'description': unicode, # Description of the server.
'host': unicode, # IP or host name for the server.
'identity': unicode, # The path to a private key / identity file for root.
'users': string_list, # A list of users to install
'packages': string_list, # A list of additional packages to install
'pip': string_list, # A list of additional python packages to install
'aws': unicode, # The key of an aws config to use.
'service_root': unicode | default('/srv'), # Directory root to the services.
'mysql_root_password': unicode, # Sets the root mysql password.
'root_login': unicode | default('root'), # Root login
'root_password': unicode, # Password for root, if available.
'language': unicode | default("LANG=en_US.UTF-8"), # English
'motd.txt': unicode | default_file("motd.txt"), # MOTD Template
})
### Helpers ###
env.server = None
def activate(name):
global conf
env.server = conf = conf[name]
if conf.identity:
env.key_filename = env.server.identity
if conf.host:
env.hosts = [env.server.host]
if conf.aws:
import aws
aws.activate(env.server.aws)
if conf.root_password:
env.user = conf.root_login
env.password = env.server.root_password
def put_template(local, remote, context, **kwargs):
from jinja2 import Template
template = Template(contents_of_path(local))
io = StringIO(template.render(**context).encode("ascii"))
put( io, remote, **kwargs )
### Tasks ###
@task
def build(name=None):
"""
Builds the server.
"""
import user
if name is not None:
activate(name)
if conf.aws:
import aws
aws.build_if_needed()
env.user = env.server.root_login
ensure_sudo()
fix_dpkg()
update()
install_essential_packages()
install_packages()
install_gems()
pip_install()
set_hostname()
set_language()
set_motd()
for k in conf.users:
user.build(k)
@task
def fix_dpkg():
"If the dpkg was interupted, this will fix it."
print "Fixing package manager..."
env.user = env.server.root_login
with settings(warn_only=True):
sudo("killall dpkg")
sudo("rm /var/lib/dpkg/lock")
sudo("dpkg --configure -a")
@task
def update():
"Updates the system / upgrades the distribution if available."
print "Updating system..."
env.user = env.server.root_login
sudo("apt-get -qy update")
sudo("apt-get -qy --force-yes dist-upgrade")
sudo("apt-get -qy --force-yes upgrade")
@task
def resize_fs(dev='/dev/xvda1'):
"Resize the filesystem on the given device."
print "Resizing filesystem..."
env.user = env.server.root_login
sudo('resize2fs %s' % dev)
@task
def ensure_sudo():
"Installs sudo if it's not already installed."
try:
with hide('running', 'stdout', 'stderr', 'status', 'aborts'):
sudo("ls") # Check for sudo / access.
except:
prev_user, env.user = env.user, env.server.root_login
prev_password, env.password = env.password, env.server.root_password
run("apt-get -qy update")
run("apt-get -qy install sudo")
env.user = prev_user
env.password = prev_password
@task
def install_essential_packages():
"Installs essential packages."
print "Installing essential packages..."
env.user = env.server.root_login
packages = [
"sudo",
"git",
"libjpeg62-dev",
"python-dev python-setuptools",
"supervisor",
"mercurial",
"libcurl3-openssl-dev",
"screen",
"redis-server",
"libevent-dev",
"libpcre3 libpcre3-dev libssl-dev",
"build-essential psmisc libxml2 libxml2-dev libxslt1.1 libxslt1-dev",
"libmysqlclient-dev",
"ruby",
]
sudo("apt-get -qy --force-yes install %s" % " ".join(packages))
sudo("easy_install virtualenv pip")
@task
def install_packages(packages=None):
"Installs the given packages."
env.user = env.server.root_login
packages = packages or env.server.packages
if packages:
sudo("apt-get -qy install %s" % packages)
@task
def set_hostname(host=None):
"Sets the hostname to the given ``host`` or the config host."
host = host or env.server.host
env.user = env.server.root_login
print "Setting hostname to %r..." % host
sudo("hostname %s" % host)
@task
def set_language(lang=None):
"Sets the language for the server to the given ``lang`` or the language in the config."
lang = lang or env.server.language
env.user = env.server.root_login
print "Setting language to %r..." % lang
append("/etc/environment", lang, use_sudo=True)
@task
def set_motd(motd=None):
"Sets the Message of the Day for the server to the given ``motd`` or the 'motd.txt' in the config."
print "Setting MOTD..."
env.user = env.server.root_login
motd = motd or env.server['motd.txt']
put_template( motd, "/etc/motd", env.server, use_sudo=True)
@task
def install_gems(gems=None):
"Install the given ruby gems."
env.user = env.server.root_login
if env.project.gems or gems:
sudo("apt-get -qy install ruby")
if env.project.gems:
sudo("gem install %s" % " ".join(env.project.gems))
if gems:
sudo("gem install %s" % " ".join(gems))
@task
def pip_install(packages=None):
"Install the given python packages."
env.user = env.server.root_login
packages = packages or env.server.pip
sudo("pip install %s" % " ".join(packages))
|
print("Let's make a band name!")
name_1 = input("What is your first name? ")
name_2 = input("What is your home state's name? ")
print(f'Your band name is {name_1} {name_2}')
|
import random
miss = 0
hits = 0
battleshipps = []
let = ['a','b','c','d','e','f','g','h','i','j']
ch = []
ps = input('input ship position')
missed = []
def guess():
pdd = random.choice(let)
pd = random.randint(1,10)
ks = (pdd+str(pd))
class botship():
def carrier():
go = True
while go == True:
pdd = random.choice(let)
pd = random.randint(1,10)
ks = (pdd+str(pd))
updown = rand.randint(1,2)
carrierindex = []
if updown == 1:#up2)
carrierindex.append(pd+2)
carrierindex.append(pd+1)
carrierindex.append(pd)
carrierindex.append(pd-1)
carrierindex.append(pd-2)
if all(i > 0 for i in carrierindex):
print('yay')
else:
print()
carrier()
## if ks in battleshipps:
## choos()
## else:
## battleshipps.append(ks)
##go = True
##while go == True:
## ks = choos()
## print(ks)
## if ks in ch:
## # resets to while
## print()
## elif go == True:
## ch.append(ks)
## x = input('input coorinate')
## if x in missed:
## print('already chosen -- try again')
## elif x == bs:
## print('yay')
## go = False
## hits = hits +1
## else:
## print('miss')
## miss = miss+1
## missed.append(x)
###---------------bot turn----------------------------------#
## if ks == ps :
## print('bot wins')
## go = False
## elif:
## print()
##
## print(miss,' misses ',hits,' hits')
##
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#返回函数
#高阶函数除了可以接受函数参数外,还可以将函数作为结果值返回
def lazy_sum(*args):
def sum():
ax = 0
for n in args:
ax += n
return ax
return sum
f = lazy_sum(1,2,3,4,5,6,7,8,9)
print(f)
print(f())
print(lazy_sum(1,3,5,7,9) == lazy_sum(1,3,5,7,9))
#每次调用都返回一个新的函数,即使传入相同的参数
#闭包
def count():
fs = []
for i in range(1, 4):
def f():
return i * i
fs.append(f)
return fs
f1, f2, f3 = count()
print(f1(), f2(), f3())
#打印结果全部都是9!原因就在于返回的函数引用了变量i,但它并非立刻执行。等到3个函数都返回时,他们所引用的变量已经变成了3
#########返回闭包时牢记一点:返回函数不要引用任何循环变量,或者后续会发生变化的变量############
def new_count():
def f(j):
def g():
return j * j
return g
fs = []
for i in range(1, 4):
fs.append(f(i)) #f(i)立刻被执行,因此i的当前值被传入f()
return fs
f4, f5, f6 = new_count()
print(f4(), f5(), f6())
#Pratice利用闭包返回一个计数器函数
def createCounter():
n = 0
def counter():
nonlocal n # 使用外层变量
n += 1
return n
return counter
# 测试:
counterA = createCounter()
print(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5
counterB = createCounter()
if [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:
print('测试通过!')
else:
print('测试失败!')
#匿名函数
#因为函数没有名字,不必担心函数名冲突
#lambda x: x * x实际就是:
#def f(x):
# return x * x
f = lambda x: x * x
print(f)
print(f(5))
#把匿名函数作为返回值返回
def build(x, y):
return lambda: x * x + y * y
print(build(2, 4)())
#Practice
L = list(filter(lambda n: n % 2 == 1, range(1, 20)))
print(L)
#装饰器decorator
#函数对象有一个__name__属性,可以拿到函数的名字
def now():
print('2019-5-13')
f = now
print(f())
print(now.__name__)
print(f.__name__)
def log(text):
def decorator(func):
def wrapper(*args, **kw):
print('%s %s():' % (text, func.__name__))
return func(*args, **kw)
return wrapper
return decorator
@log('execute')
def printDate():
print('2019-5-13')
print(printDate())
#@log放到printDate()函数的定义处,相当于执行了:printDate = log(printDate)
#log()是一个decorator
import time, functools
def metric(fn):
@functools.wraps(fn)
def wrapper(*args, **kw):
start = time.time()
result = fn(*args, **kw)
stop = time.time() - start
print('%s enecuted in %s ms' % (fn.__name__, stop * 1000))
return result
return fn
@metric
def fast(x, y):
time.sleep(0.0012)
return x + y
@metric
def slow(x, y, z):
time.sleep(0.1234)
return x * y * z;
f = fast(11, 22)
s = slow(11, 22, 33)
if f != 33:
print('测试失败!')
elif s != 7986:
print('测试失败!')
#偏函数 Partial function
#int()函数提供了base参数,默认值为10.
print(int('12345', base=8))
print(int('12345', 16))
#定义一个int2()函数,实现二进制转换
def int2(x, base=2):
return int(x, base)
print(int2('1000000'))
print(int2('1010101'))
print(int2('1000000', base=10))
#创建偏函数时,实际上可以接受函数对象、*args和**kw这3个参数
int8 = functools.partial(int, base=8) #相当于:kw = {'base': 8} int('10010', **kw)
print(int8('10010'))
max2 = functools.partial(max, 10) #相当于args = (10, 5, 6, 7) max(*args)
print(max2(5, 6, 7))
|
"""
Tests for the coupling estimators implemented in cassiopeia/tools/coupling.py
"""
import unittest
import networkx as nx
import numpy as np
import pandas as pd
import cassiopeia as cas
from cassiopeia.data import CassiopeiaTree
from cassiopeia.data import utilities as data_utilities
from cassiopeia.mixins import CassiopeiaError
class TestDataUtilities(unittest.TestCase):
def setUp(self) -> None:
tree = nx.DiGraph()
tree.add_edges_from(
[
("A", "B"),
("A", "C"),
("B", "D"),
("B", "E"),
("B", "F"),
("E", "G"),
("E", "H"),
("C", "I"),
("C", "J"),
]
)
meta_data = pd.DataFrame.from_dict(
{
"D": ["TypeB", 10],
"F": ["TypeA", 5],
"G": ["TypeA", 3],
"H": ["TypeB", 22],
"I": ["TypeC", 2],
"J": ["TypeC", 11],
},
orient="index",
columns=["CellType", "nUMI"],
)
self.tree = CassiopeiaTree(tree=tree, cell_meta=meta_data)
def test_evolutionary_coupling_basic(self):
random_state = np.random.RandomState(1231234)
evolutionary_coupling = cas.tl.compute_evolutionary_coupling(
self.tree,
meta_variable="CellType",
random_state=random_state,
minimum_proportion=0.0,
number_of_shuffles=10,
)
inter_cluster_distances = data_utilities.compute_inter_cluster_distances(
self.tree, meta_item="CellType"
)
# background computed with random seed set above and 10 shuffles
# (state1, state2): (mean, sd)
expected_summary_stats = {
("TypeA", "TypeA"): (1.7, 0.6000000000000001),
("TypeA", "TypeB"): (3.55, 0.4716990566028302),
("TypeA", "TypeC"): (3.55, 0.4716990566028302),
("TypeB", "TypeA"): (3.55, 0.4716990566028302),
("TypeB", "TypeB"): (2.0, 0.5),
("TypeB", "TypeC"): (3.65, 0.45),
("TypeC", "TypeA"): (3.55, 0.4716990566028302),
("TypeC", "TypeB"): (3.65, 0.45),
("TypeC", "TypeC"): (1.8, 0.5567764362830022),
}
expected_coupling = inter_cluster_distances.copy()
for s1 in expected_coupling.index:
for s2 in expected_coupling.columns:
mean = expected_summary_stats[(s1, s2)][0]
sd = expected_summary_stats[(s1, s2)][1]
expected_coupling.loc[s1, s2] = (
inter_cluster_distances.loc[s1, s2] - mean
) / sd
pd.testing.assert_frame_equal(
expected_coupling, evolutionary_coupling, atol=0.001
)
# make sure errors are raised for numerical data
self.assertRaises(
CassiopeiaError,
cas.tl.compute_evolutionary_coupling,
self.tree,
"nUMI",
)
def test_evolutionary_coupling_custom_dissimilarity_map(self):
weight_matrix = pd.DataFrame.from_dict(
{
"D": [0.0, 0.5, 1.2, 0.4, 0.5, 0.6],
"F": [0.5, 0.0, 3.0, 1.1, 3.0, 0.1],
"G": [1.2, 3.0, 0.0, 0.8, 0.2, 0.8],
"H": [0.4, 1.1, 0.8, 0.0, 2.0, 2.1],
"I": [0.5, 3.0, 0.2, 2.0, 0.0, 0.1],
"J": [0.6, 0.1, 1.8, 2.1, 0.1, 0.0],
},
orient="index",
columns=["D", "F", "G", "H", "I", "J"],
)
random_state = np.random.RandomState(1231234)
evolutionary_coupling = cas.tl.compute_evolutionary_coupling(
self.tree,
meta_variable="CellType",
random_state=random_state,
minimum_proportion=0.0,
number_of_shuffles=10,
dissimilarity_map=weight_matrix,
)
inter_cluster_distances = data_utilities.compute_inter_cluster_distances(
self.tree, meta_item="CellType", dissimilarity_map=weight_matrix
)
# background computed with random seed set above and 10 shuffles
# (state1, state2): (mean, sd)
expected_summary_stats = {
("TypeB", "TypeB"): (0.695, 0.5456418239101545),
("TypeB", "TypeA"): (1.0000000000000002, 0.281291663580704),
("TypeB", "TypeC"): (1.0925, 0.44763964301656745),
("TypeA", "TypeB"): (1.0000000000000002, 0.3148412298286232),
("TypeA", "TypeA"): (0.63, 0.4550824101193101),
("TypeA", "TypeC"): (1.2349999999999999, 0.391503512117069),
("TypeC", "TypeB"): (1.0675000000000001, 0.4493119740225047),
("TypeC", "TypeA"): (1.26, 0.41791147387933725),
("TypeC", "TypeC"): (0.4699999999999999, 0.41424630354415953),
}
expected_coupling = inter_cluster_distances.copy()
for s1 in expected_coupling.index:
for s2 in expected_coupling.columns:
mean = expected_summary_stats[(s1, s2)][0]
sd = expected_summary_stats[(s1, s2)][1]
expected_coupling.loc[s1, s2] = (
inter_cluster_distances.loc[s1, s2] - mean
) / sd
pd.testing.assert_frame_equal(
expected_coupling, evolutionary_coupling, atol=0.001
)
def test_evolutionary_coupling_minimum_proportion(self):
self.tree.cell_meta.loc["J", "CellType"] = "TypeD"
random_state = np.random.RandomState(1231234)
evolutionary_coupling = cas.tl.compute_evolutionary_coupling(
self.tree,
meta_variable="CellType",
random_state=random_state,
minimum_proportion=1 / 6, # This will drop types C and D
number_of_shuffles=10,
)
# make sure TypeC and TypeD are not in the evolutionary coupling matrix
expected_types = ["TypeA", "TypeB"]
self.assertCountEqual(expected_types, evolutionary_coupling.index)
self.assertCountEqual(expected_types, evolutionary_coupling.columns)
# make sure couplings are correct
inter_cluster_distances = data_utilities.compute_inter_cluster_distances(
self.tree, meta_item="CellType"
)
inter_cluster_distances = inter_cluster_distances.loc[
expected_types, expected_types
]
expected_summary_stats = {
("TypeB", "TypeB"): (1.4, 0.19999999999999998),
("TypeB", "TypeA"): (2.6, 0.19999999999999998),
("TypeA", "TypeB"): (2.6, 0.19999999999999998),
("TypeA", "TypeA"): (1.4, 0.19999999999999998),
}
expected_coupling = inter_cluster_distances.copy()
for s1 in expected_coupling.index:
for s2 in expected_coupling.columns:
mean = expected_summary_stats[(s1, s2)][0]
sd = expected_summary_stats[(s1, s2)][1]
expected_coupling.loc[s1, s2] = (
inter_cluster_distances.loc[s1, s2] - mean
) / sd
evolutionary_coupling = evolutionary_coupling.loc[
expected_types, expected_types
]
pd.testing.assert_frame_equal(
expected_coupling, evolutionary_coupling, atol=0.001
)
if __name__ == "__main__":
unittest.main()
|
from rest_framework import serializers
from . models import Users
class UsersSerializer(serializers.ModelSerializer):
class Meta:
model = Users
fields='__all__'
extra_kwargs = {'avartar': {'required': False}} |
#!/usr/bin/python3
def is_same_class(obj, a_class):
"""
1.returns TRUE if the object is exactly an instance of the specified class.
2.otherwise FALSE.
"""
return(type(obj) == a_class)
|
import sys
from os.path import abspath, dirname, join
path = join(dirname(dirname(abspath(__file__))), 'src')
sys.path.append(path)
|
# _*_ coding:utf_8 -*_
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
import time
import os
driver = webdriver.Chrome()
driver.maximize_window()
driver.find_element_by_css_selector('#agr_pop > div.pop_footer > a.reg_btn.reg_agree').click()
time.sleep(2)
source = driver.find_element_by_css_selector('#slideCode > div.cpt-drop-box > div.cpt-drop-btn')
print(source.size)
'''size:{'height': 40, 'width': 40}'''
ele = driver.find_element_by_css_selector('#slideCode > div.cpt-drop-box > div.cpt-bg-bar')
print(ele.size)
'''size:{'height': 40, 'width': 300}'''
ActionChains(driver).drag_and_drop_by_offset(source,ele.size['width'],-source.size['height']).perform()
|
from service_manager import ServiceManager
from series_service import SeriesService
from cv_service import CVService
from edit_service import EditService
from export_service import ExportService
# need to explicitly import these for pyinstaller
import pymysql
import pyodbc
#import psycopg2
__all__ = [
'EditService',
'CVService',
'SeriesService',
'ExportService',
'ServiceManager',
] |
# encoding=utf-8
# Author: Yu-Lun Chiang
# Description: Test NewsCrawler
import logging
import pytest
from collections import namedtuple
from Sanga.media import udn
from Sanga.struct import NewsStruct
logger = logging.getLogger(__name__)
TEST_DATA = namedtuple(
typename="TEST_DATA",
field_names=[
"name",
"link",
"expected_output",
],
)
TEST_DATA_1 = TEST_DATA(
name="聯合新聞網_1",
link="https://udn.com/news/story/7253/5550651",
expected_output=NewsStruct(
title="凌華 推出單板電腦",
content="工業電腦廠凌華科技(6166)昨(22)日推出cPCI-A3525系列CompactPCI® Serial單板電腦,搭載英特爾最新第九代Xeon®或Core™ i7處理器,專為軌道交通、航太與國防、工業自動化等高性能關鍵任務型產業的新一代應用而設計。\n\n\n\r\n凌華網路通訊暨公共建設事業處總經理高福遙表示,鐵路運輸、航太、國防及工業領域的系統整合商及解決方案提供商,需要可提供高性能與持續性可靠度,且同時便於嵌入與升級的新科技。PICMG 2.0 CPCI-S.0 CompactPCI® Serial Rev 2.0序列資料傳輸標準,即是目前開放式工業電腦規範CompactPCI (CPCI)標準中最快速、功能最豐富、最具成本效益,還可支援SATA、USB、Ethernet 等高速介面。\n\n\n\r\n凌華指出,新產品提升的效能規範,能以直接插背板介面形式的P6連接器支援外加固態硬碟子板;PCIe介面也提供多個資料頻寬選擇。另外,還可支援七個6 Gb/s SATA介面,以及高達10個USB 2.0/3.0埠於背板的連接模組。\n\n\n\n\n\n\n\n",
keywords=["凌華", "航太", "工業電腦"],
category="股市",
media="聯合新聞網",
datetime="2021-06-23T00:17:31+08:00",
link="https://udn.com/news/story/7253/5550651",
),
)
TEST_DATA_2 = TEST_DATA(
name="聯合新聞網_2",
link="https://udn.com/news/story/7254/5542521",
expected_output=NewsStruct(
title="NB散熱概念夯 雙鴻營收帶勁",
content="法人看好,受惠新冠疫情推升NB需求仍持續,加上因散熱結構改變帶動ASP的提升,預估雙鴻(3324)今年NB散熱的營收將年增逾兩成。\n\n\n\r\n散熱模組廠昨(18)日股價動起來,不僅雙鴻量增,盤中股價攻漲停195元,創波段新高外,泰碩午盤股價也衝上漲停板54.7元,拉動奇鋐股價也跟進大漲達9.19%,終場收77.2元,散熱模組族群股價回神。\n\n\n\r\n此外,顯卡散熱風扇龍頭廠動力-KY終場收4.82%、尼得科超眾漲幅也有2.16%,力致漲幅也有1.8%。\n\n\n\r\n雙鴻昨日成交量放大至1.06萬張,股價為近兩個月高點。雙鴻今年5月營收9.51億元,月減7.4%,仍較去年成長2.9%,雖然筆電需求維持高檔、新機持續出貨,主要原因則為受到客戶缺料影響,累計今年前五月營收54.08億元,年增28.75%,為同期新高。\n\n\n\r\n動力今年5月合併營收1.7億元,月減8.8%,為同期新高,且較去年同期大幅增加57.51%,主要受惠台北國際電腦展線上展(COMPUTEX 2021)於6月初舉行,下游各系統廠商紛紛推出支援NVIDIA最新款GeForce RTX 3080 Ti 和 RTX 3070 Ti晶片的顯示卡產品。",
keywords=None,
category="股市",
media="聯合新聞網",
datetime="2021-06-19T01:12:50+08:00",
link="https://udn.com/news/story/7254/5542521",
),
)
TEST_DATA_LIST = [TEST_DATA_1, TEST_DATA_2]
@pytest.fixture(scope="module")
def newsCrawler():
logger.warning("Init News Crawler ...")
return udn.UDN()
@pytest.mark.parametrize(
argnames="name, link, expected_output",
argvalues=[tuple(t) for t in TEST_DATA_LIST],
ids=[
f"{t.name}, {t.link[:50]+'...' if len(t.link) > 50 else t.link}"
for t in TEST_DATA_LIST
],
)
def test_get_info(
newsCrawler,
name,
link,
expected_output,
):
output = newsCrawler.getInfo(link=link)
assert NewsStruct.__2dict__(output) == NewsStruct.__2dict__(expected_output)
|
#!/usr/bin/python
"""
Copyright (C) 2011 Konstantin Andrusenko
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
@package blik.nodesManager.nodesMonitor
@author Konstantin Andrusenko
@date July 9, 2011
This module contains the implementation of NodesMonitor class.
NodesMonitor is thread that monitor nodes states and update
database (nm_node table) if state changing
"""
from datetime import datetime
from datetime import timedelta
from Queue import Queue
import threading
import time
from blik.utils.config import Config
from blik.utils.databaseConnection import DatabaseConnection
from blik.utils.logger import logger
from blik.utils.friBase import FriCaller
#cluster states
CS_NOT_ACTIVE = 0
CS_ACTIVE = 1
#admin node stated
ANS_NOT_ACTIVE = 0
ANS_ACTIVE = 1
ANS_FAILED = 2
#current node states
CNS_OFF = 0
CNS_ON = 1
FINISH_FLAG = None
class NodesMonitor(threading.Thread):
def __init__(self):
self.__monitor_timeout = Config.nodes_monitor_timeout
self.__threads = []
self.__nodes_queue = Queue()
self.__dbconn = DatabaseConnection()
self.__stoped = False
for i in xrange(Config.monitor_workers_count):
thread = MonitorWorkerThread(self.__nodes_queue, Config.monitor_wait_response_timeout)
thread.setName('MonitorWorkerThread#%i'%i)
thread.start()
self.__threads.append(thread)
threading.Thread.__init__(self, name='NodesMonitor')
def stop(self):
if self.__stoped:
return
self.__stoped = True
for i in self.__threads:
self.__nodes_queue.put(FINISH_FLAG)
self.__nodes_queue.join()
def run(self):
logger.info('NodesMonitor started!')
while not self.__stoped:
try:
t0_point = datetime.now()
rows = self.__dbconn.select("SELECT N.hostname, N.current_state FROM nm_node N, nm_cluster C \
WHERE N.cluster_id=C.id AND N.admin_status=%s AND C.status=%s",
(ANS_ACTIVE, CS_ACTIVE))
logger.debug('NodesMonitor: Selected %i nodes for checking state'%len(rows))
for row in rows:
self.__nodes_queue.put((row[0], row[1]))
self.__nodes_queue.join()
except Exception, err:
logger.error('NodesMonitor failed: %s' % err)
finally:
#calculate timeout
dt = datetime.now() - t0_point
wait_time = timedelta(0, self.__monitor_timeout) - dt
if wait_time.days == 0:
#sleep cycle
for i in range(wait_time.seconds):
#check daemon state every second
if self.__stoped:
break
time.sleep(1.0)
time.sleep(wait_time.microseconds * 0.000001)
logger.info('NodesMonitor stoped!')
class MonitorWorkerThread(threading.Thread):
def __init__(self, queue, wait_timeout):
self.queue = queue
self.wait_timeout = wait_timeout
self.dbconn = DatabaseConnection()
threading.Thread.__init__(self)
def run(self):
logger.info('%s started!'%self.getName())
fri_caller = FriCaller()
packet = {'id':0, 'node': '',
'operation': 'LIVE'}
while True:
try:
item = self.queue.get()
if item == FINISH_FLAG:
logger.info('%s stoped!'%self.getName())
break
hostname, last_state = item
ret_code, ret_message = fri_caller.call(hostname, packet, timeout=self.wait_timeout)
if ret_code:
logger.debug('Node with hostname %s is not live!!!'%hostname)
else:
logger.debug('Node with hostname %s is live!'%hostname)
if ret_code and (last_state == CNS_ON):
self.__change_node_state(hostname, CNS_OFF)
elif (not ret_code) and (last_state == CNS_OFF):
self.__change_node_state(hostname, CNS_ON)
except Exception, err:
logger.error('%s failed: %s'%(self.getName(), err))
finally:
self.queue.task_done()
def __change_node_state(self, hostname, state):
self.dbconn.modify("UPDATE nm_node SET current_state=%s WHERE hostname=%s", (state,hostname))
|
class Order:
def __init__(self, value):
self.value = value
def __repr__(self):
return str(self.value)
def __add__(self, other):
return Order(self.value * other.value)
def __radd__(self, other):
return self.value + other
def __mul__(self, other):
return Order(self.value + other.value)
with open(r'days\18\input.txt', 'r') as f:
t = f.read().split('\n')
t = list(filter(None, t))
total = 0
for s in t:
s = ''.join('*' if i == '+' else '+' if i == '*' else i for i in list(s))
s = ''.join(f'Order({i})' if i.isdigit() else str(i) for i in list(s))
total += eval(s)
# print(eval(s))
print(total)
|
from django.urls import path
from rest_framework_jwt.views import (
obtain_jwt_token,
verify_jwt_token,
refresh_jwt_token,
)
from . import views
from config.views import validate_jwt_token, CustomObtainJSONWebToken
app_name = "users"
# 회원, 로그인 관련 EndPoint(URL) 생성
urlpatterns = [
path("", views.UserList.as_view()),
path("validate/", validate_jwt_token),
path("login/", obtain_jwt_token),
path("verify/", verify_jwt_token),
path("refresh/", refresh_jwt_token),
path("me/", views.MeView.as_view()),
path("me/favs/", views.FavsView.as_view()),
path("<int:pk>/", views.user_detail),
path("findusername/", views.findUsername),
path("findpassword/", views.findPassword),
path("me/changepassword/", views.ChangePasswordView.as_view()),
]
|
from g3 import Parser
s = ''
with open('programs/success.txt') as f:
s = f.read()
print(s)
p = Parser(s)
print(p.run())
|
from __future__ import division
from Graph import PolledGraph
import gtk, os
class HScrollGraph(PolledGraph):
"""A graph that shows time on the horizontal axis, multiple channels
of data on the Y axis, and scrolls horizontally so current data
is always on the right edge of the graph.
gridSize: grid size, in pixels
scrollRate: Graph scrolling rate, in pixels per second
"""
def __init__(self,
size = (384,128),
channels = [],
gridSize = 32,
scrollRate = 50,
pollInterval = 10,
bgColor = None,
gridColor = None,
):
PolledGraph.__init__(self, size, channels, pollInterval, bgColor, gridColor)
self.gridSize = gridSize
self.scrollRate = scrollRate
self.gridPhase = 0.0 # Number of pixels we've scrolled, modulo gridSize
# Normally we copy from backbuffer to backbuffer when scrolling.
# This breaks under the win32 implementation of GDK, so use a temporary
# buffer then.
self.useTemporaryPixmap = os.name == "nt"
def resized(self):
PolledGraph.resized(self)
if self.useTemporaryPixmap:
self.tempPixmap = gtk.gdk.Pixmap(self.window, self.width, self.height)
def graphChannel(self, channel):
"""Hook for graphing the current values of each channel. Called for
every channel each time the graph is scrolled, and called on one
channel each time that channel changes.
"""
pass
def drawBackground(self):
"""Draw our grid pixmap and backing store"""
# Create a grid pixmap as wide as our grid and as high as our window,
# used to quickly initialize new areas of the graph with our grid pattern.
self.gridPixmap = gtk.gdk.Pixmap(self.window, self.gridSize, self.height)
self.initGrid(self.gridPixmap, self.gridSize, self.height)
# Initialize the backing store
self.drawGrid(0, self.width)
def initGrid(self, drawable, width, height):
"""Draw our grid on the given drawable"""
drawable.draw_rectangle(self.bgGc, True, 0, 0, width, height)
# Horizontal grid lines
for y in range(0, height, self.gridSize):
drawable.draw_rectangle(self.gridGc, True, 0, y, width, 1)
# Vertical grid lines
for x in range(0, width, self.gridSize):
drawable.draw_rectangle(self.gridGc, True, x, 0, 1, height)
def drawGrid(self, x, width):
"""Draw grid lines on our backing store, using the current gridPhase,
to the rectangle (x, 0, width, self.height)
"""
srcOffset = (x + int(self.gridPhase)) % self.gridSize
gc = self.get_style().fg_gc[gtk.STATE_NORMAL]
if srcOffset > 0:
# Draw the first partial grid column
columnWidth = self.gridSize - srcOffset
if columnWidth > width:
columnWidth = width
self.backingPixmap.draw_drawable(gc, self.gridPixmap, srcOffset, 0, x, 0,
columnWidth, self.height)
x += columnWidth
width -= columnWidth
while width > 0:
# Draw each remaining full or partial grid column
columnWidth = self.gridSize
if columnWidth > width:
columnWidth = width
self.backingPixmap.draw_drawable(gc, self.gridPixmap, 0, 0, x, 0,
columnWidth, self.height)
x += self.gridSize
width -= self.gridSize
def integrate(self, dt):
"""Update the graph, given a time delta from the last call to this function"""
# Can't update if we aren't mapped
if not (self.width and self.height):
return
# Calculate the new gridPhase and the number of freshly exposed pixels,
# correctly accounting for subpixel gridPhase changes.
oldGridPhase = self.gridPhase
self.gridPhase += dt * self.scrollRate
newPixels = int(self.gridPhase) - int(oldGridPhase)
self.gridPhase %= self.gridSize
if newPixels > 0:
# Scroll the backing store left by newPixels pixels
if self.useTemporaryPixmap:
# We can't safely copy from and to the same pixmap, copy this
# via a temporary off-screen buffer.
self.tempPixmap.draw_drawable(self.get_style().fg_gc[gtk.STATE_NORMAL],
self.backingPixmap, newPixels, 0, 0, 0,
self.width - newPixels, self.height)
self.backingPixmap.draw_drawable(self.get_style().fg_gc[gtk.STATE_NORMAL],
self.tempPixmap, 0, 0, 0, 0,
self.width - newPixels, self.height)
else:
# Copy directly from and to the backbuffer
self.backingPixmap.draw_drawable(self.get_style().fg_gc[gtk.STATE_NORMAL],
self.backingPixmap, newPixels, 0, 0, 0,
self.width - newPixels, self.height)
# Draw a blank grid in the new area
self.drawGrid(self.width - newPixels, newPixels)
# Let subclasses update their positions to account for our scrolling
self.exposedPixels(newPixels)
# Graph all channels
for channel in self.channels:
# Effectively clear the channel's "dirty flag", so in deciding
# whether a draw is necessary if we're not scrolling we don't account
# for changes that will be drawn now.
channel.hasChanged(self)
self.graphChannel(channel)
# Schedule an expose event to blit the whole backbuffer to the screen
self.queue_draw_area(0, 0, self.width, self.height)
else:
# Even if we're not scrolling, we should update the graph if the channel
# values have changed. This is especially necessary when the channels
# are being updated much more often than the graph is scrolled.
for channel in self.channels:
if channel.hasChanged(self):
self.graphChannel(channel)
self.queue_draw_area(self.width-1, 0, 1, self.height)
def exposedPixels(self, nPixels):
"""Called when the graph scrolls, with the number of pixels it has scrolled by.
Used as a hook for updating drawing coordinates in subclasses.
"""
pass
def getTweakControls(self):
import Tweak
return PolledGraph.getTweakControls(self) + [
Tweak.Quantity(self, 'scrollRate', range=(0,200), name="Scroll Rate")
]
class HScrollLineGraph(HScrollGraph):
"""A horizontally scrolling real-time line plot.
Expects scalar values within the given range.
"""
def __init__(self,
size = (384,128),
channels = [],
gridSize = 32,
scrollRate = 50,
range = (0,1),
pollInterval = 10,
bgColor = None,
gridColor = None,
):
HScrollGraph.__init__(self, size, channels, gridSize,
scrollRate, pollInterval, bgColor, gridColor)
self.range = range
self.penVectors = {}
def graphChannel(self, channel):
value = channel.getValue()
if value is None:
return
# Scale the channel value to match a range of (0,1)
scaled = (value - self.range[0]) / (self.range[1] - self.range[0])
scaled = min(1.5, max(-0.5, scaled))
# Calculate a current pen position, always at the right side of the graph
penVector = (self.width-1, int((self.height-1) * (1-scaled)))
# If we have both a new pen vector and an old pen vector, we can draw a line
if self.penVectors.has_key(channel):
oldPenVector = self.penVectors[channel]
self.graphChannelLine(channel, oldPenVector, penVector)
# Store the pen vector for later
self.penVectors[channel] = penVector
def graphChannelLine(self, channel, oldPenVector, penVector):
self.backingPixmap.draw_line(channel.getGC(self),
oldPenVector[0], oldPenVector[1],
penVector[0], penVector[1])
def resized(self):
HScrollGraph.resized(self)
# Invalidate saved pen vectors
self.penVectors = {}
def exposedPixels(self, nPixels):
"""Scrolls our old pen vectors along with the graph,
culling out old vectors while we're at it.
"""
# Note that it's important to use items() here,
# ince the penVectors dict might change while we're
# iterating.
for channel, penVector in self.penVectors.items():
if channel in self.channels:
self.penVectors[channel] = (
penVector[0] - nPixels,
penVector[1])
else:
del self.penVectors[channel]
class HScrollAreaGraph(HScrollLineGraph):
"""A horizontally scrolling real-time filled area plot."""
def graphChannelLine(self, channel, oldPenVector, penVector):
self.backingPixmap.draw_polygon(channel.getGC(self), True, (
(oldPenVector[0], oldPenVector[1]),
(penVector[0], penVector[1]),
(penVector[0], self.height-1),
(oldPenVector[0], self.height-1)))
|
"""
From page 263
Write a program that, given the URL of a web page will attempt to down-
load every linked page on the page. The program should flag any pages
that have a 404 "Not Found" status code and print them out as broken links.
"""
import bs4
import logging
import os
import requests
URL = r'http://www.warnerbros.com/archive/spacejam/movie/'
logger = logging.getLogger('automate_boring.image_site_downloader')
if __name__ == '__main__':
os.makedirs('links', exist_ok=True)
res = requests.get(URL)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, 'html.parser')
link_elems = soup.select('a')
for link_elem in link_elems:
link_url = link_elem.get('href')
if link_url[:4] != 'http':
link_url = URL + link_url # relative link
link_res = requests.get(link_url)
try:
link_res.raise_for_status()
except requests.exceptions.HTTPError as e:
if link_res.status_code == 404:
print('broken link {}'.format(link_url))
else:
raise
basename = os.path.basename(link_url)
if basename:
basename = basename.replace('?', '')
with open(os.path.join('links', basename), 'wb') as f:
for chunk in link_res.iter_content(100000):
f.write(chunk)
|
# Generated by Django 2.2.4 on 2021-05-04 03:46
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login_registration_app', '0019_auto_20210503_2344'),
]
operations = [
migrations.AlterField(
model_name='user',
name='birthday',
field=models.DateField(default=datetime.datetime(2021, 5, 3, 23, 46, 49, 266649)),
),
]
|
import matplotlib.pyplot as plt
# generates bar graph for output by title analysis file
# This bar graph is based on dictrionary of POS, TAG, DEP
POS = {'compound': 22698, 'ROOT': 15556, 'punct': 14358, 'nsubj': 8148,
'aux': 3406, 'neg': 588, 'advmod': 2908, 'dobj': 6091, 'mark': 691,
'advcl': 803, 'det': 5584, 'ccomp': 1140, 'nummod': 1779, 'nsubjpass': 310,
'acl': 725, 'prep': 10420, 'amod': 4123, 'pobj': 10502, 'auxpass': 417,
'appos': 2184, 'nmod': 1197, 'cc': 1721, 'conj': 1998, 'intj': 271,
'expl': 36, 'quantmod': 148, 'pcomp': 558, 'relcl': 457, 'xcomp': 866,
'acomp': 561, 'npadvmod': 900, 'attr': 743, 'prt': 486, 'dep': 767,
'poss': 1253, 'case': 452, 'agent': 191, 'csubj': 70, '': 57, 'dative': 87,
'predet': 23, 'oprd': 71, 'preconj': 4, 'meta': 8, 'parataxis': 21,
'csubjpass': 3}
ax = plt.figure().add_subplot(1,1,1)
plt.bar(list(POS.keys()), POS.values(), color='g')
plt.setp(ax.get_xticklabels(), fontsize=4)
plt.savefig('dep.jpg')
|
import numpy as np
epsilon = 1e-5
class Triangle:
def __init__(self, a, b, c):
self.a, self.b, self.c = np.array(a), np.array(b), np.array(c)
def closest_point_to(self, x):
if self.is_inside(x):
return None
min_pt, min_dist = None, np.inf
for s in self.segments():
s_min_pt = s.closest_point_to(x)
if np.linalg.norm(x - s_min_pt) < min_dist:
min_dist = np.linalg.norm(x - s_min_pt)
min_pt = s_min_pt
return min_pt
def is_inside(self, x):
total_area = self.area()
area0 = Triangle(self.a, self.b, x).area()
area1 = Triangle(self.b, self.c, x).area()
area2 = Triangle(self.c, self.a, x).area()
is_correct_area = np.abs(total_area - (area0 + area1 + area2)) < epsilon
return is_correct_area
def area(self):
a, b, c = self.a, self.b, self.c
return np.abs((c[0]*(a[1] - b[1]) + a[0]*(b[1] - c[1]) + b[0]*(c[1] - a[1])) / 2.0)
def segments(self):
return (Segment(self.a, self.b), Segment(self.b, self.c), Segment(self.c, self.a))
class Segment:
def __init__(self, p0, p1):
self.p0, self.p1 = np.array(p0), np.array(p1)
def closest_point_to(self, x):
# min_{0<=t<=1} ||t*(p1-p0) + p0 - x||_{2}^{2}
v = self.p1 - self.p0
b = self.p0 - x
t = -np.dot(v, b) / np.dot(v, v)
if (0 <= t <= 1):
intersection = t*(self.p1 - self.p0) + self.p0
return intersection
else:
if np.linalg.norm(x - self.p0) < np.linalg.norm(x - self.p1):
return self.p0
else:
return self.p1
|
## This program is a bulk entry tool. It uses the Student class, which holds: name, student ID #,
## GPA, expected grade for course, and full/part-time (as name, ID, GPA, grade, and time). It
## asks for input to create size instances of the student object, then pickles them in a file chosen
## by the user. The list_tool module does most of the work.
import pickle
import student
import list_tool
# define main
def main():
# done flag and while loop ensures valid input
done = False
while done == False:
# try loop handles exceptions and gets valid input
try:
size = int(input("I'm ready to make a list for you. How many students are we adding? "))
if size > 0:
done = True
else:
print('\nI need an integer greater than 0.\n')
except:
print('\nI need an integer greater than 0.\n')
# call make_list
object_list = list_tool.make_list(size)
# get name of file to pickle the list to
use_file = str(input("\nI'm ready to pickle your list. What would you like to name the file? "))
# ensure file ending conventions are followed
if use_file.endswith('.dat') == False:
use_file += '.dat'
print("\nI've appended .dat to your file name.\n")
# call list_brine
list_tool.list_brine(use_file, object_list)
# confirmation message
print("I've pickled your list in", use_file, "\n")
# help message
print("I'll pull that data back up and let you see it just to double-check.\n")
# call get_file
list_tool.get_file(use_file)
# call main
main()
|
y
import time
import multiprocessing as mp
from multiprocessing import Pool
print(mp.cpu_count())
def f(a_list):
out = 0
for n in a_list:
out += n*n
time.sleep(0.1)
return out
def f_mp(a_list):
chunks = [a_list[i::5] for i in range(5)]
pool = Pool(processes=5)
result = pool.map(f, chunks)
return sum(result)
"""import time
import queue
import random
import multiprocessing as mp
from multiprocessing import Process
def calc_square(numbers):
for i in numbers:
time.sleep(3) # artificial time-delay
print('square: ', str(i * i))
def calc_cube(numbers):
for i in numbers:
time.sleep(3)
print('cube: ', str(i * i * i))
if __name__ == "__main__":
arr = [2, 3, 8, 9]
p1 = multiprocessing.Process(target=calc_square, args=(arr,))
p2 = multiprocessing.Process(target=calc_cube, args=(arr,))
# creating two Process here p1 & p2
p1.start()
p2.start()
# starting Processes here parallel by using start function.
p1.join()
# this join() will wait until the calc_square() function is finished.
p2.join()
# this join() will wait unit the calc_cube() function is finished.
print("Successes!")
"""
"""
if __name__ == "__main__":
arr = [2, 3, 8, 9]
t1 = threading.Thread(target=calc_square, args=(arr,))
t2 = threading.Thread(target=calc_cube, args=(arr,))
# creating two threads here t1 & t2
t1.start()
t2.start()
# starting threads here parallel by using start function.
t1.join()
# this join() will wait until the cal_square() function is finished.
t2.join()
# this join() will wait unit the cal_cube() function is finished.
print("Successes!")
""" |
from selenium import webdriver
import time
browser = webdriver.Chrome() # еявное ожидание, каждого элемента до 5 секунд
browser.implicitly_wait(5)
browser.get("http://suninjuly.github.io/wait1.html")
#time.sleep(1)
button = browser.find_element_by_id("check")
button.click()
message = browser.find_element_by_id("check_message")
assert "успешно" in message.text |
import random
import sys
import copy
from .board_manager import BoardManager
from .evaluation_manager import EvaluationManager
class AIManager:
def __init__(self, game_manager):
self.game_manager = game_manager
self.evaluation_manager = EvaluationManager()
self.lookup_table = {}
self.opponent = True
self.all_player = False
self.depth = 5
def make_move(self):
self.best_move()
# self.random_move()
self.game_manager.next_round()
def random_move(self):
empty_cols = self.game_manager.board_manager.logic_manager.get_valid_moves()
if empty_cols == []:
self.game_manager.game_over = True
return
col = random.choice(empty_cols)
team = self.game_manager.turn_manager.turn
self.game_manager.board_manager.drop_piece(col=col, type=team)
def best_move(self):
board_manager = self.game_manager.board_manager
team = self.game_manager.turn_manager.turn
col = self.minimax(board_manager=board_manager, depth=self.depth, alpha=-sys.maxsize, beta=sys.maxsize, is_maximizing=True, team=team)[0]
if col == None:
print("This shouldn't happen, EVER!")
return
self.game_manager.board_manager.drop_piece(col=col, type=team)
def minimax(self, board_manager: BoardManager, depth: int, alpha, beta, is_maximizing: bool, team: str):
if team == 'p1':
opponent = 'p2'
else:
opponent = 'p1'
valid_moves = board_manager.logic_manager.get_valid_moves()
is_terminal = valid_moves == [] or board_manager.logic_manager.is_game_over(turn=team) or board_manager.logic_manager.is_game_over(turn=opponent)
if depth == 0 or is_terminal:
if is_terminal:
if board_manager.logic_manager.is_game_over(turn=team):
return (None, sys.maxsize)
elif board_manager.logic_manager.is_game_over(turn=opponent):
return (None, -sys.maxsize)
else:
return (None, 0)
else:
return (None, self.evaluation_manager.evaluate_board(board_manager=board_manager, team=team))
if is_maximizing:
value = -sys.maxsize
column = None
for col in valid_moves:
board_manager_copy = copy.deepcopy(board_manager)
board_manager_copy.drop_piece(col=col, type=team)
new_score = self.minimax(board_manager=board_manager_copy, depth=depth-1, alpha=alpha, beta=beta, is_maximizing=False, team=team)[1]
if new_score > value:
value = new_score
column = col
alpha = max(alpha, value)
if alpha >= beta:
break
return (column, value)
else:
value = sys.maxsize
column = None
for col in valid_moves:
board_manager_copy = copy.deepcopy(board_manager)
board_manager_copy.drop_piece(col=col, type=opponent)
new_score = self.minimax(board_manager=board_manager_copy, depth=depth-1, alpha=alpha, beta=beta, is_maximizing=True, team=team)[1]
if new_score < value:
value = new_score
column = col
beta = min(beta, value)
if alpha >= beta:
break
return (column, value)
|
# tuodaan tarvittavat osat
from flask import render_template, request, redirect, url_for, flash
from flask_login import login_user, logout_user, current_user, login_required
from application import app, db, login_required
from application.auth.models import User
from application.teams.models import Team
from application.user.forms import UserForm, RegularUserForm
# uuden käyttäjän (roolina 'admin') luominen
@app.route("/user/new/<team_id>", methods=["GET", "POST"])
def user_signup(team_id):
error = None
form = UserForm(team_id=team_id)
if form.validate_on_submit():
try:
u = User(username=form.username.data, password=form.password.data, role="ADMIN", team_id=team_id)
db.session.add(u)
db.session.commit()
flash("User created")
except Exception as e:
error = e
return redirect(url_for("auth_login"))
return render_template("/user/new.html", form = form, error = error)
# uuden peruskäyttäjän luominen joukkueelle
@app.route("/user/new_regular", methods=["GET", "POST"])
@login_required(role="ADMIN")
def regular_user_signup():
error = None
form = RegularUserForm()
team = current_user.team_id
if form.validate_on_submit():
try:
u = User(username=form.username.data, password=form.password.data, role="REGULAR", team_id = team)
db.session.add(u)
db.session.commit()
flash("User created")
except Exception as e:
error = e
return redirect(url_for("index"))
return render_template("/user/new_regular.html", form = form, error = error) |
# from .component import Component
from .shellscript import ShellScript
from ._version import __version__
def reactopya_templates_directory():
import os
dirname = os.path.dirname(os.path.realpath(__file__))
return os.path.join(dirname, 'templates')
def reactopya_server_directory():
import os
dirname = os.path.dirname(os.path.realpath(__file__))
return os.path.join(dirname, 'reactopya_server') |
first = None
second = None
n1 = None
n2 = None
def get_two_numbers():
numbers = [
['first', None],
['second', None],
]
for number in numbers:
n = None
while n is None:
s = raw_input("What is the " + number[0] + " number? ")
try:
n = int(s)
if n < 0:
print("Please enter a non-negative number.")
n = None
continue
except:
continue
number[1] = n
return numbers[0][1], numbers[1][1]
def compute(n1, n2):
add = n1 + n2
subtract = n1 - n2
multiply = n1 * n2
divide = None
try:
divide = n1 / n2
except:
divide = 'N/A'
return add, subtract, multiply, divide
def main():
n1, n2 = get_two_numbers()
nsum, nsub, nmul, ndiv = compute(n1, n2)
template = '{} + {} = {}\n{} - {} = {}\n{} * {} = {}\n{} / {} = {}'
print template.format(n1, n2, nsum,
n1, n2, nsub,
n1, n2, nmul,
n1, n2, ndiv)
if __name__ == '__main__':
main()
|
import requests
from lxml import etree
# url = "http://www.dytt8.net/html/gndy/dyzz/list_23_1.html"
#请求头定义为全局变量
HEADERS = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36'
}
#设置全局变量,拼接完整的url链接
BASE_DOMAIN = 'http://www.dytt8.net'
#定义为函数 获取页面所有的url链接
def get_detail_urls(url):
respones = requests.get(url,headers=HEADERS)
#respones.text 该网页采用的是gbk编码的 使用respones.text会产生乱码
#respones.content 所以必须使用respones.content 并解码为gbk
# print(respones.content.decode("gbk"))
# text = respones.content.decode('gbk') #此处必须使用gbk解码 *****
text = respones.text #因为有部分页面内有无法解析的字 所以使用gbk会产生错误
html = etree.HTML(text)
detail_url = html.xpath("//table[@class='tbspan']//a/@href")
# for url in urls:
# print(BASE_DOMAIN + url)
detail_urls = map(lambda url:BASE_DOMAIN+url,detail_url)
return detail_urls
def parse_detail_page(url):
movie = {} #存放到集合中 用于存放每个电影的所有数据 *****注意:存放到集合中
respones = requests.get(url,headers=HEADERS)
text = respones.content.decode('gbk') #使用gbk进行解码
html = etree.HTML(text)
title =html.xpath("//div[@class='title_all']//font[@color='#07519a']/text()")[0] #获取标签里面的文字内容 使用text()方法
movie['title'] = title
zoomE = html.xpath("//div[@id='Zoom']")[0]
imgs = zoomE.xpath(".//img/@src") #获取到的图片有两张 海报和电影截图
cover = imgs[0] #第一张是海报
screenshot = imgs[1] #第二张是电影截图
movie['cover'] = cover
movie['screenshot'] = screenshot
infos = zoomE.xpath(".//text()") #获取所有电影相关的文本数据
def parse_info(info,rule): #定义函数用于截取电影详细数据
return info.replace(rule,"").strip()
for index,info in enumerate(infos): #此for循环使用enumerate() 会增加index下标属性 用于获取数据下标
if info.startswith("◎译 名"): #函数startswith() 是以什么开头
info = parse_info(info,"◎译 名") #replace("将要替换的","替换后的") 替换字符串
movie['yiming'] = info
elif info.startswith("◎年 代"):
info = parse_info(info,"◎年 代")
movie['year'] = info
elif info.startswith("◎产 地"):
info = parse_info(info,"◎产 地")
movie['country'] = info
elif info.startswith("◎豆瓣评分"):
info = parse_info(info,"◎豆瓣评分")
movie['pingfen'] = info
elif info.startswith("◎片 长"):
info = parse_info(info,"◎片 长")
movie['time'] = info
elif info.startswith("◎导 演"):
info = parse_info(info,"◎导 演")
movie["daoyan"] = info
#***注意 主演这个会有很多个,并且每一个都占用一行
elif info.startswith("◎主 演"):
info = parse_info(info,"◎主 演")
actors = [info]
for x in range(index+1,len(infos)): #使用下标遍历所有有关演员的数据
actor = infos[x].strip()
if actor.startswith("◎"):
break
actors.append(actor)
movie['actors'] = actors
#***和主演的获取方式一样,简介正文前面有空行
elif info.startswith("◎简 介 "):
info = parse_info(info,"◎简 介 ")
for x in range(index+1,len(infos)):
profile = infos[x].strip()
if profile.startswith("【下载地址】"):
break
movie['profile'] = profile
download_url = html.xpath("//td[@bgcolor='#fdfddf']/a/@href")[0]
movie['download_url'] = download_url
return movie
#定义函数获取1-7页的链接
def spider():
#其中{}表示占用位
base_url = "http://www.dytt8.net/html/gndy/dyzz/list_23_{}.html"
for x in range(1,8):
movies = []
url = base_url.format(x) #获取每一页的总链接
detail_urls = get_detail_urls(url) #获取每一页中所有的链接
for detail_url in detail_urls:
movie = parse_detail_page(detail_url) #获取每一个链接中的详情信息
# break #调试只要第一个链接
# break #调试应该是阻止两个for循环 只执行一次 ***********这里失误***************
movies.append(movie)
print(movie)
if __name__ == '__main__':
spider() |
from shared.settings import *
SERVER_IP = ''
SERVER = SERVER if DEBUG else SERVER_IP
ADDR = (SERVER, PORT)
|
# Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sample from trained autoregressive MDN."""
import os
import time
from absl import app
from absl import flags
from absl import logging
from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from flax.metrics import tensorboard
from flax.training import checkpoints
import utils.data_utils as data_utils
import utils.train_utils as train_utils
import utils.losses as losses
import utils.metrics as metrics
import train_transformer
import input_pipeline
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
FLAGS = flags.FLAGS
AUTOTUNE = tf.data.experimental.AUTOTUNE
flags.DEFINE_integer('sample_seed', 1,
'Random number generator seed for sampling.')
flags.DEFINE_string('sampling_dir', 'sample', 'Sampling directory.')
flags.DEFINE_integer('sample_size', 1000, 'Number of samples.')
flags.DEFINE_boolean('flush', True, 'Flush generated samples to disk.')
def sample(num_samples=2400, steps=32, embedding_dims=42, rng_seed=1,
real=None):
"""Generate samples using autoregressive decoding.
Args:
num_samples: The number of samples to generate.
steps: Number of sampling steps.
embedding_dims: Number of dimensions per embedding.
rng_seed: Initialization seed.
Returns:
generated: An array of generated samples.
"""
rng = jax.random.PRNGKey(rng_seed)
rng, model_rng = jax.random.split(rng)
# Create a model with dummy parameters and a dummy optimizer
lm_kwargs = {
'num_layers': FLAGS.num_layers,
'num_heads': FLAGS.num_heads,
'mdn_mixtures': FLAGS.mdn_components,
'num_mlp_layers': FLAGS.num_mlp_layers,
'mlp_dims': FLAGS.mlp_dims
}
model = train_transformer.create_model(model_rng, (steps, embedding_dims),
lm_kwargs,
batch_size=1,
verbose=True)
optimizer = train_transformer.create_optimizer(model, 0)
early_stop = train_utils.EarlyStopping()
# Load learned parameters
optimizer, early_stop = checkpoints.restore_checkpoint(
FLAGS.model_dir, (optimizer, early_stop))
# Autoregressive decoding
t0 = time.time()
tokens = jnp.zeros((num_samples, steps, embedding_dims))
for i in range(steps):
pi, mu, log_sigma = optimizer.target(tokens, shift=False)
channels = tokens.shape[-1]
mdn_k = pi.shape[-1]
out_pi = pi.reshape(-1, mdn_k)
out_mu = mu.reshape(-1, channels * mdn_k)
out_log_sigma = log_sigma.reshape(-1, channels * mdn_k)
mix_dist = tfd.Categorical(logits=out_pi)
mus = out_mu.reshape(-1, mdn_k, channels)
log_sigmas = out_log_sigma.reshape(-1, mdn_k, channels)
sigmas = jnp.exp(log_sigmas)
component_dist = tfd.MultivariateNormalDiag(loc=mus, scale_diag=sigmas)
mixture = tfd.MixtureSameFamily(mixture_distribution=mix_dist,
components_distribution=component_dist)
rng, embed_rng = jax.random.split(rng)
next_tokens = mixture.sample(seed=embed_rng).reshape(*tokens.shape)
next_z = next_tokens[:, i]
if i < steps - 1:
tokens = jax.ops.index_update(tokens, jax.ops.index[:, i + 1], next_z)
else:
tokens = next_tokens # remove start token
logging.info('Generated samples in %f seconds', time.time() - t0)
return tokens
def main(argv):
del argv # unused
logging.info(FLAGS.flags_into_string())
logging.info('Platform: %s', jax.lib.xla_bridge.get_backend().platform)
# Make sure TensorFlow does not allocate GPU memory.
tf.config.experimental.set_visible_devices([], 'GPU')
log_dir = FLAGS.sampling_dir
pca = data_utils.load(os.path.expanduser(
FLAGS.pca_ckpt)) if FLAGS.pca_ckpt else None
slice_idx = data_utils.load(os.path.expanduser(
FLAGS.slice_ckpt)) if FLAGS.slice_ckpt else None
dim_weights = data_utils.load(os.path.expanduser(
FLAGS.dim_weights_ckpt)) if FLAGS.dim_weights_ckpt else None
train_ds, eval_ds = input_pipeline.get_dataset(
dataset=FLAGS.dataset,
data_shape=FLAGS.data_shape,
problem='vae',
batch_size=FLAGS.batch_size,
normalize=FLAGS.normalize,
pca_ckpt=FLAGS.pca_ckpt,
slice_ckpt=FLAGS.slice_ckpt,
dim_weights_ckpt=FLAGS.dim_weights_ckpt,
include_cardinality=False)
eval_min, eval_max = eval_ds.min, eval_ds.max
eval_ds = eval_ds.unbatch()
if FLAGS.sample_size is not None:
eval_ds = eval_ds.take(FLAGS.sample_size)
real = np.stack([ex for ex in tfds.as_numpy(eval_ds)])
shape = real[0].shape
# Generate samples
generated = sample(FLAGS.sample_size, shape[-2], shape[-1], FLAGS.sample_seed,
real)
# Dump generated to CPU.
generated = np.array(generated)
# Write samples to disk (used for listening).
if FLAGS.flush:
# Inverse transform data back to listenable/unnormalized latent space.
generated_t = input_pipeline.inverse_data_transform(generated,
FLAGS.normalize, pca,
train_ds.min,
train_ds.max, slice_idx,
dim_weights)
real_t = input_pipeline.inverse_data_transform(real, FLAGS.normalize, pca,
eval_min, eval_max,
slice_idx, dim_weights)
data_utils.save(real_t, os.path.join(log_dir, 'mdn/real.pkl'))
data_utils.save(generated_t, os.path.join(log_dir, 'mdn/generated.pkl'))
if __name__ == '__main__':
app.run(main)
|
from werkzeug.security import generate_password_hash
from application import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
password = db.Column(db.String(120), unique=True)
is_valid = db.Column(db.Boolean, default=True)
def __init__(self, username, password, is_valid):
self.username = username
self.password = generate_password_hash(password)
self.is_valid = is_valid
class News(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200), nullable=False)
content = db.Column(db.Text(), nullable=False)
types = db.Column(db.String(10), nullable=False)
img_url = db.Column(db.String(300))
author = db.Column(db.String(20))
view_count = db.Column(db.Integer)
created_at = db.Column(db.DateTime)
is_valid = db.Column(db.Boolean, default=1)
is_recommend = db.Column(db.Boolean, default=0)
def __init__(self, title, content, types, img_url, author, view_count, created_at, is_valid, is_recommend):
self.title = title
self.content = content
self.types = types
self.img_url = img_url
self.author = author
self.view_count = view_count
self.created_at = created_at
self.is_valid = is_valid
self.is_recommend = is_recommend
|
from docx import Document
import xlwings as xw
def get_paragraph_text(path, n):
"""
获取指定段落的文本
:param path: word路径
:param n: 第几段落,从0开始计数
:return: word文本
"""
document = Document(path)
all_paragraphs = len(document.paragraphs)
if all_paragraphs > n:
paragraph_text = document.paragraphs[n].text
return paragraph_text
else:
raise IndexError('paragraph index (%s) out of range, in total %s' % (n, all_paragraphs))
def get_paragraphs_text(path):
"""
获取所有段落的文本
:param path: word路径
:return: list类型,如:
['Test', 'hello world', ...]
"""
document = Document(path)
all_paragraphs = document.paragraphs
paragraphs_text = []
for paragraph in all_paragraphs:
paragraphs_text.append(paragraph.text)
return paragraphs_text
def get_all_tables_text(path):
"""
获取word中所有表格的文本
:param path: word路径
:return: list类型的二维数组
如:[['年龄', '排序'], ['23', '00',], ...]
"""
document = Document(path)
all_tables = document.tables
text_list = []
for table in all_tables:
for row in table.rows:
text = []
for cell in row.cells:
text.append(cell.text)
text_list.append(text)
return text_list
def get_table_text(path, n=0):
"""
获取word中的第n个表格的文本
:param path: word路径
:param n: 第几个表格,从0开始计算
:return: list类型的二维数组
如:[['年龄', '排序'], ['23', '00',], ...]
"""
document = Document(path)
all_tables = len(document.tables)
if all_tables > n:
table = document.tables[n]
text_list = []
for row in table.rows:
text = []
for cell in row.cells:
text.append(cell.text)
text_list.append(text)
return text_list
else:
raise IndexError('table index (%s) out of range, in total %s' % (n, all_tables))
def get_cell_text(path, n=0, row=0, col=0):
"""
获取某个表格的某个单元格的值
:param path: word路径
:param n: 第几个表格,从0开始计算
:param row: 第几行,从0开始计算
:param col: 第几列,从0开始计算
:return: 单元格的值,str类型
"""
document = Document(path)
all_tables = len(document.tables)
if all_tables > n:
rows = len(document.tables[n].rows)
cols = len(document.tables[n].columns)
if rows > row and cols > col:
tab = document.tables[n].rows[row].cells[col]
return tab.text
else:
raise IndexError('cell index out of range, %s;%s' % (row, col))
else:
raise IndexError('table index (%s) out of range, in toatl %s' % (n, all_tables))
def get_table_text_to_excel(path, n=0):
"""
获取word中的第n个表格的文本
并以原表格样式写入excel新建的sheet中去
:param path: word路径
:param n: 第几个表格,从0开始计算
:return: list类型的二维数组
如:[['年龄', '排序'], ['23', '00',], ...]
"""
app = xw.App(visible = False, add_book = False) # 启动Excel程序
workbook = app.books.add() # 新建工作簿
worksheet = workbook.sheets.add() #新建工作表
document = Document(path) #读取word文档
all_tables = len(document.tables)
if all_tables > n:
table = document.tables[n]
for row_index,row in enumerate(table.rows):
for col_index,cell in enumerate(row.cells):
# print('range({},{})的值为{}'.format(row_index,col_index,cell.text))
worksheet.range(row_index+1,col_index+1).value = cell.text
workbook.save('表格{}.xlsx'.format(n))
workbook.close()
app.quit()
return True
else:
raise IndexError('table index (%s) out of range, in total %s' % (n, all_tables))
def get_table_text_to_excel2(path):
"""
获取word中的所有表格的文本
并以原表格样式写入excel新建的sheet中去
:param path: word路径
:param n: 第几个表格,从0开始计算
:return: list类型的二维数组
如:[['年龄', '排序'], ['23', '00',], ...]
"""
app = xw.App(visible = False, add_book = False) # 启动Excel程序
workbook = app.books.add() # 新建工作簿
document = Document(path) #读取word文档
for table in document.tables:
worksheet = workbook.sheets.add()
for row_index,row in enumerate(table.rows):
for col_index,cell in enumerate(row.cells):
# print('range({},{})的值为{}'.format(row_index,col_index,cell.text))
worksheet.range(row_index+1,col_index+1).value = cell.text
workbook.save('指定word文档表格内容.xlsx')
workbook.close()
app.quit()
def get_table_text_to_excel3(path):
"""
获取word中的所有表格的文本
并以原表格样式写入excel的第一个sheet中去,不同的表格以空行分隔
:param path: word路径
:param n: 第几个表格,从0开始计算
:return: list类型的二维数组
如:[['年龄', '排序'], ['23', '00',], ...]
"""
app = xw.App(visible = False, add_book = False) # 启动Excel程序
workbook = app.books.add() # 新建工作簿
worksheet = workbook.sheets[0]
document = Document(path) #读取word文档
row_i = 1
for table in document.tables:
for row_index,row in enumerate(table.rows):
for col_index,cell in enumerate(row.cells):
# print('range({},{})的值为{}'.format(row_index,col_index,cell.text))
worksheet.range(row_index+row_i,col_index+1).value = cell.text
row_i += row_index +2
workbook.save('指定word文档表格内容.xlsx')
workbook.close()
app.quit() |
from requests import get, post, put, delete
print('Проверяем GET запрос для RequestResource')
print(get('http://localhost:8080/api/request/2').json())
# обработка несуществующего запроса
print(get('http://localhost:8080/api/request/1000').json())
print('Проверяем GET запрос для RequestListResource')
print(get('http://localhost:8080/api/request').json())
print('Проверяем POST запрос')
# пустой запрос
print(post('http://localhost:8080/api/request').json())
# неполный запрос
print(post('http://localhost:8080/api/request',
json={'name': 'Заголовок'}).json())
print(post('http://localhost:8080/api/request',
json={'name': 'Заголовок',
'description': 'Текст новости',
'address': 'Сочи, Апшеронская, 5',
'sender_id': 1,
'is_active': True}).json())
print('Проверяем DELETE запрос')
print(delete('http://localhost:8080/api/request/9').json())
# неверный DELETE запрос
print(delete('http://localhost:8080/api/request/999').json())
print('Проверяем PUT запрос')
print(put('http://localhost:8080/api/request/9',
json={'name': 'new новый Заголовок',
'is_active': 0}).json())
|
#!/usr/bin/env python
import sys
def main(input, output):
with file(input, "r") as i, file(output, "w") as o:
write = False
for line in iter(i.readline, ''):
if line.startswith("\\begin{document"):
write=True
continue
if line.startswith("\\end{document"):
write=False
continue
if write:
o.write(line)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
|
# -*- coding:utf-8 -*-
"""
Taken from: https://github.com/zhuang-hao-ming/c4.5-python
"""
import operator
import math
def get_majority_class(class_list):
class_count = {}
for item in class_list:
if item in class_count:
class_count[item] += 1
else:
class_count[item] = 1
class_items = class_count.items()
class_items = sorted(class_items, key=operator.itemgetter(1), reverse=True)
return class_items[0][0]
def calculate_shannon_ent(data_set):
num_of_samples = len(data_set)
class_count = {}
for sample in data_set:
class_label = sample[-1]
if class_label in class_count:
class_count[class_label] += 1
else:
class_count[class_label] = 1
ent = 0.0
for key in class_count:
prob = float(class_count[key]) / num_of_samples
ent -= prob * math.log(prob, 2)
return ent
def split_data_set(data_set, feature_idx, val):
sub_data_set = []
for sample in data_set:
if sample[feature_idx] == val:
reduce_sample = sample[:feature_idx] + sample[feature_idx+1:]
sub_data_set.append(reduce_sample)
return sub_data_set
def choose_best_feature_for_split(data_set):
num_features = len(data_set[0]) - 1
base_entropy = calculate_shannon_ent(data_set)
best_info_gain_ration = 0.0
best_feature_idx = -1
for i in range(num_features):
feature_list = [sample[i] for sample in data_set]
unique_values = set(feature_list)
new_entropy = 0.0
split_info = 0.0
for val in unique_values:
sub_set = split_data_set(data_set, i, val)
prob = float(len(sub_set)) / len(data_set)
new_entropy += prob * calculate_shannon_ent(sub_set)
split_info += -prob * math.log(prob, 2)
if split_info == 0:
continue
info_gain = base_entropy - new_entropy
info_gain_ration = info_gain / split_info
if info_gain_ration > best_info_gain_ration:
best_info_gain_ration = info_gain_ration
best_feature_idx = i
return best_feature_idx
def create_tree(data_set, labels):
class_list = [item[-1] for item in data_set]
if class_list.count(class_list[0]) == len(class_list):
return class_list[0]
if len(data_set[0]) == 1:
return get_majority_class(class_list)
best_feature_idx = choose_best_feature_for_split(data_set)
best_feature_label = labels[best_feature_idx]
del(labels[best_feature_idx])
my_tree = {best_feature_label: {}}
feature_vals = [sample[best_feature_idx] for sample in data_set]
unique_vals = set(feature_vals)
for val in unique_vals:
sub_labels = labels[:]
my_tree[best_feature_label][val] = create_tree(split_data_set(data_set, best_feature_idx, val), sub_labels)
return my_tree
def main():
tennis = [
['Sunny', 'Hot', 'High', 'False', 'No'],
['Sunny', 'Hot', 'High', 'True', 'No'],
['Overcast', 'Hot', 'High', 'False', 'Yes'],
['Rainy', 'Mild', 'High', 'False', 'Yes'],
['Rainy', 'Cool', 'Normal', 'False', 'Yes'],
['Rainy', 'Cool', 'Normal', 'True', 'No'],
['Overcast', 'Cool', 'Normal', 'True', 'Yes'],
['Sunny', 'Mild', 'High', 'False', 'No'],
['Sunny', 'Cool', 'Normal', 'False', 'Yes'],
['Rainy', 'Mild', 'Normal', 'False', 'Yes'],
['Sunny', 'Mild', 'Normal', 'True', 'Yes'],
['Overcast', 'Mild', 'High', 'True', 'Yes'],
['Overcast', 'Hot', 'Normal', 'False', 'Yes'],
['Rainy', 'Mild', 'High', 'True', 'No']
]
tennis_labels = ['Outlook', 'Temperature', 'Humidity', 'Windy']
basketball = [
['Home', 'Out', '1-NBC', 'Win'],
['Home', 'In', '1-NBC', 'Lose'],
['Away', 'Out', '2-ESPN', 'Win'],
['Away', 'Out', '3-FOX', 'Win'],
['Home', 'Out', '1-NBC', 'Win'],
['Away', 'Out', '4-ABC', 'Win'],
]
basketball_labels = ['Home/Away', 'In Top 25?', 'Media', 'Win/Lose']
decision_tree_tennis = create_tree(tennis, tennis_labels)
decision_tree_basketball = create_tree(basketball, basketball_labels)
print(decision_tree_tennis)
print(decision_tree_basketball)
if __name__ == '__main__':
main()
|
import numpy as np
from datetime import datetime
import os
import ROOT
import LORAparameters as LORA
import os.path
from datetime import date
nTraceV1=4000
nDetV1=20
dtV1=2.5
timesV1=np.arange(0,4000*dtV1,dtV1)
avg_threshold=np.asarray([ 27.24867982, 27.08347496, 21.3469044, 22.45092564, 16.96505311,
16.62127466, 17.87311077, 17.90886191, 20.3131563, 20.87103187,
20.18339909, 22.10192716, 22.96687405, 23.04840668, 13.57128983,
13.92050076, 27.7231563, 15.69400607, 23.10057663, 24.20940819,])
def find_tag(timestamp,ns_timestamp,data_dir):
#print '\n________________________\n'
#timestamp = LORA4times[t][0]
#ns_timestamp = LORA4times[t][1]
dt_object = datetime.fromtimestamp(timestamp)
year=dt_object.year
month=dt_object.month
day=dt_object.day
hour=dt_object.hour
minute=dt_object.minute
second=dt_object.second
#print year,month,day,hour
#print int(timestamp),int(ns_timestamp)
filestr1= str(year)+str(month).zfill(2)+str(day).zfill(2)
filestr3='-1'
filestr4='-1'
if day>1:
filestr2= str(year)+str(month).zfill(2)+str(day-1).zfill(2)
filestr4= str(year)+str(month).zfill(2)+str(day-2).zfill(2)
if (month==10 or month==5 or month==7 or month==12) and day==1:
filestr2= str(year)+str(month-1).zfill(2)+str(30).zfill(2)
filestr4= str(year)+str(month-1).zfill(2)+str(29).zfill(2)
if (month==2 or month==4 or month==6 or month==8 or month==9 or month==11) and day==1:
filestr2= str(year)+str(month-1).zfill(2)+str(31).zfill(2)
filestr4= str(year)+str(month-1).zfill(2)+str(30).zfill(2)
if (month==3) and day==1:
filestr2= str(year)+str(month-1).zfill(2)+str(28).zfill(2)
filestr3= str(year)+str(month-1).zfill(2)+str(29).zfill(2)
filestr4= str(year)+str(month-1).zfill(2)+str(27).zfill(2)
if (month==1) and day==1:
filestr2= str(year-1)+str(12).zfill(2)+str(31).zfill(2)
filestr4= str(year-1)+str(12).zfill(2)+str(30).zfill(2)
#filestr3= str(year)+str(month).zfill(2)+str(day-1).zfill(2)
filelist=[]
for file in os.listdir(data_dir):
if filestr1 in file or filestr2 in file or filestr3 in file or filestr4 in file:
if '.log' in file:
#print(os.path.join(data_dir, file))
filelist.append(os.path.join(data_dir, file))
found=0
filetag='no'
for i in np.arange(len(filelist)):
#print filelist[i]
#for i in np.arange(1):
file=open(filelist[i])
stamps=[]
try:
stamps=np.genfromtxt(file,skip_header=10,usecols=(2,3))
except:
print('empty log')
file.close()
if len(stamps)>0:
if len(stamps[(stamps.T[0]==timestamp)*(stamps.T[1]==ns_timestamp)])>0:
filetag=filelist[i].split('raw/')[1].split('.')[0]
found=1
#print 'looking for this file: ',data_dir+filetag+'.root'
if found==1 and os.path.exists(data_dir+filetag+'.root'):
return filetag
else:
return 'no_match'
def find_tag_exception(timestamp,ns_timestamp,data_dir):
#print '\n________________________\n'
#timestamp = LORA4times[t][0]
#ns_timestamp = LORA4times[t][1]
print('no standard log file avaliable, trying to look at .root files')
dt_object = datetime.fromtimestamp(timestamp)
year=dt_object.year
month=dt_object.month
day=dt_object.day
hour=dt_object.hour
minute=dt_object.minute
second=dt_object.second
#print year,month,day,hour
#print int(timestamp),int(ns_timestamp)
filestr1= str(year)+str(month).zfill(2)+str(day).zfill(2)
filestr3='-1'
filestr4='-1'
if day>1:
filestr2= str(year)+str(month).zfill(2)+str(day-1).zfill(2)
filestr4= str(year)+str(month).zfill(2)+str(day-2).zfill(2)
if (month==10 or month==5 or month==7 or month==12) and day==1:
filestr2= str(year)+str(month-1).zfill(2)+str(30).zfill(2)
filestr4= str(year)+str(month-1).zfill(2)+str(29).zfill(2)
if (month==2 or month==4 or month==6 or month==8 or month==9 or month==11) and day==1:
filestr2= str(year)+str(month-1).zfill(2)+str(31).zfill(2)
filestr4= str(year)+str(month-1).zfill(2)+str(30).zfill(2)
if (month==3) and day==1:
filestr2= str(year)+str(month-1).zfill(2)+str(28).zfill(2)
filestr3= str(year)+str(month-1).zfill(2)+str(29).zfill(2)
filestr4= str(year)+str(month-1).zfill(2)+str(27).zfill(2)
if (month==1) and day==1:
filestr2= str(year-1)+str(12).zfill(2)+str(31).zfill(2)
filestr4= str(year-1)+str(12).zfill(2)+str(30).zfill(2)
filelist=[]
for file in os.listdir(data_dir):
if filestr1 in file or filestr2 in file or filestr3 in file or filestr4 in file:
if '.root' in file:
#print(os.path.join(data_dir, file))
filelist.append(os.path.join(data_dir, file))
found=0
filetag='no'
for i in np.arange(len(filelist)):
root_file=ROOT.TFile.Open(filelist[i])
try:
tree_event = root_file.Get("Tree_event")
event_index=-1
event_index=find_entry_number(timestamp,ns_timestamp,tree_event)
print('did we find the index? {0}'.format(event_index))
if event_index>-1:
found=1
filetag=filelist[i].split('raw/')[1].split('.')[0]
break
except:
print('can\'t open the event leaf')
if found==1:
print('returning file tag')
return filetag
else:
return 'no_match'
def getTime(det, entry):
det.GetEntry(entry)
ymd=det.GetLeaf('YMD').GetValue()
gps=det.GetLeaf('GPS_time_stamp').GetValue()
ctd=det.GetLeaf('CTD').GetValue()
nsec=det.GetLeaf('nsec').GetValue()
return ymd,gps,ctd,nsec
def getTimeSec(lasa, entry):
lasa.GetEntry(entry)
gps=lasa.GetLeaf('GPS_time_stamp').GetValue()
return gps
def find_entry_number(lora_utc,lora_nsec,tree_event):
event=-1
det1=tree_event.GetBranch('Det1')
det5=tree_event.GetBranch('Det5')
det9=tree_event.GetBranch('Det9')
det13=tree_event.GetBranch('Det13')
det17=tree_event.GetBranch('Det17')
det1.GetLeaf('GPS_time_stamp')
nE= det1.GetEntries()
#times=np.zeros([nLasa,nE])
trigger_check=0
diff_best=1e10
for e in np.arange(nE):
ymd1,gps1,ctd1,nsec1= getTime(det1,e)
ymd2,gps2,ctd2,nsec2= getTime(det5,e)
ymd3,gps3,ctd3,nsec3= getTime(det9,e)
ymd4,gps4,ctd4,nsec4= getTime(det13,e)
ymd5,gps5,ctd5,nsec5= getTime(det17,e)
times=[gps1,gps2,gps3,gps4,gps5]
times_ns=[nsec1,nsec2,nsec3,nsec4,nsec5]
if lora_utc in times:
diff=np.max(np.abs(lora_nsec-np.asarray(times_ns)[np.asarray(times_ns)>1]))
if diff<10000: # w/in 10 us to avoid mis-triggers
if diff<diff_best:
diff_best=diff
trigger_check=1
event=e
return event
def getDataV1(det, entry):
det.GetEntry(entry)
detector=det.GetLeaf('detector').GetValue()
ymd=det.GetLeaf('YMD').GetValue()
gps=det.GetLeaf('GPS_time_stamp').GetValue()
ctd=det.GetLeaf('CTD').GetValue()
nsec=det.GetLeaf('nsec').GetValue()
trigg_condition=det.GetLeaf('Trigg_condition').GetValue()
try:
trigg_pattern=det.GetLeaf('Trigg_pattern').GetValue()
except:
trigg_pattern=-1
total_counts=det.GetLeaf('Total_counts').GetValue()
pulse_height=det.GetLeaf('Pulse_height').GetValue()
pulse_width=det.GetLeaf('Pulse_width').GetValue()
counts=det.GetLeaf('counts')
hold=np.zeros([nTraceV1])
for i in np.arange(nTraceV1):
hold[i]=counts.GetValue(i)
info={'det':detector,'ymd':ymd,'gps':gps,'ctd':ctd,'nsec':nsec,'trigg_condition':trigg_condition,'trigg_pattern':trigg_pattern,'total_counts':total_counts,'pulse_height':pulse_height,'pulse_width':pulse_width,'counts':hold}
return info
def return_root(filename,utc,nsec,data_dir):
#log_file=open(data_dir+filename+'.log','r')
root_file=ROOT.TFile.Open(data_dir+filename+'.root')
print('reading root file: {0}'.format(data_dir+filename+'.root'))
tree_sec = root_file.Get("Tree_sec")
tree_event = root_file.Get("Tree_event")
tree_log = root_file.Get("Tree_log")
tree_noise = root_file.Get("Tree_noise")
event_index=find_entry_number(utc,nsec,tree_event)
all_info=[]
for i in np.arange(nDetV1):
detname='Det'+str(1+i)
det=tree_event.GetBranch(detname)
info=getDataV1(det,event_index)
all_info.append(info)
return all_info
def find_sec_number(lora_utc,tree_sec,i):
event=-1
lasa1=tree_sec.GetBranch('Lasa'+str(i+1))
lasa1.GetLeaf('GPS_time_stamp')
nE= lasa1.GetEntries()
#times=np.zeros([nLasa,nE])
diff_best=1e10
event=-1
index_found=0
for e in np.arange(nE):
gps1= getTimeSec(lasa1,e)
if gps1==lora_utc and index_found==0:
event=e
index_found=1
if gps1>lora_utc and index_found==0:
event=e
index_found=1
return event
def getSecV1(det, entry):
det.GetEntry(entry)
lasa=det.GetLeaf('Lasa').GetValue()
YMD=det.GetLeaf('YMD').GetValue()
GPS_time_stamp=det.GetLeaf('GPS_time_stamp').GetValue()
sync=det.GetLeaf('sync').GetValue()
CTP=det.GetLeaf('CTP').GetValue()
quant=det.GetLeaf('quant').GetValue()
Channel_1_Thres_count_high=det.GetLeaf('Channel_1_Thres_count_high').GetValue()
Channel_1_Thres_count_low=det.GetLeaf('Channel_1_Thres_count_low').GetValue()
Channel_2_Thres_count_high=det.GetLeaf('Channel_2_Thres_count_high').GetValue()
Channel_2_Thres_count_low=det.GetLeaf('Channel_2_Thres_count_low').GetValue()
Satellite_info=det.GetLeaf('Satellite_info').GetValue()
info={'lasa':lasa,'YMD':YMD,'GPS_time_stamp':GPS_time_stamp,'sync':sync,'CTP':CTP,'quant':quant}
return info
def return_second_data(filename,utc,nsec,data_dir):
#log_file=open(data_dir+filename+'.log','r')
root_file=ROOT.TFile.Open(data_dir+filename+'.root')
tree_sec = root_file.Get("Tree_sec")
tree_event = root_file.Get("Tree_event")
tree_log = root_file.Get("Tree_log")
tree_noise = root_file.Get("Tree_noise")
entry=np.zeros([LORA.nLASA])
for i in np.arange(LORA.nLASA):
entry[i]=find_sec_number(utc,tree_sec,i)
all_info=[]
all_info1=[]
all_info2=[]
for i in np.arange(LORA.nLASA):
lasaname='Lasa'+str(1+i)
det=tree_sec.GetBranch(lasaname)
all_info.append(getSecV1(det, int(entry[i])))
all_info1.append(getSecV1(det, int(entry[i]+1)))
all_info2.append(getSecV1(det, int(entry[i]+2)))
return all_info,all_info1,all_info2
def getLogV1(det,d, entry):
nE= det.GetEntries()
det.GetEntry(entry)
YMD=det.GetLeaf('YMD').GetValue()
GPS_time_stamp=det.GetLeaf('Time_stamp').GetValue()
Threshold_low=det.GetLeaf('Channel_thres_low').GetValue()
print('_________________________________________')
print('finding threshold: {0}'.format(Threshold_low))
if Threshold_low==0.0:
print('issue with Threshold_low {0}'.format(nE))
thresh_avg=0
thresh_count=0
for i in np.arange(nE):
det.GetEntry(i)
thesh_temp=det.GetLeaf('Channel_thres_low').GetValue()
if thesh_temp>0.0:
thresh_avg=thresh_avg+thesh_temp
thresh_count=thresh_count+1
if thresh_count>0:
Threshold_low=thresh_avg/(1.0*thresh_count)
else:
Threshold_low=avg_threshold[d]
info={'threshold':Threshold_low}
return info
def getNoiseV1(det, entry):
det.GetEntry(entry)
sigma=det.GetLeaf('Sigma').GetValue()
mean=det.GetLeaf('Mean').GetValue()
info={'mean':mean,'sigma':sigma}
return info
def find_noise_number(lora_utc,tree_noise,d):
event=-1
det1=tree_noise.GetBranch('Det'+str(d))
det1.GetLeaf('GPS_time_stamp')
nE= det1.GetEntries()
#times=np.zeros([nLasa,nE])
event=-1
index_found=0
for e in np.arange(nE):
det1.GetEntry(e)
gps1=det1.GetLeaf('GPS_time_stamp').GetValue()
if gps1>lora_utc and index_found==0:
event=e
index_found=1
return event
def find_log_number(lora_utc,tree_log,d):
event=-1
det1=tree_log.GetBranch('Det'+str(d))
det1.GetLeaf('Time_stamp')
nE= det1.GetEntries()
#times=np.zeros([nLasa,nE])
event=-1
index_found=0
for e in np.arange(nE):
det1.GetEntry(e)
gps1=det1.GetLeaf('Time_stamp').GetValue()
if gps1>lora_utc and index_found==0:
event=e
index_found=1
return event
def return_log_data(filename,utc,nsec,data_dir):
#log_file=open(data_dir+filename+'.log','r')
root_file=ROOT.TFile.Open(data_dir+filename+'.root')
tree_sec = root_file.Get("Tree_sec")
tree_event = root_file.Get("Tree_event")
tree_log = root_file.Get("Tree_log")
tree_noise = root_file.Get("Tree_noise")
entry1=find_log_number(utc,tree_log,1)
entry2=find_log_number(utc,tree_log,5)
entry3=find_log_number(utc,tree_log,9)
entry4=find_log_number(utc,tree_log,13)
entry5=find_log_number(utc,tree_log,17)
all_info=[]
for i in np.arange(LORA.nLORA):
detname='Det'+str(1+i)
det=tree_log.GetBranch(detname)
if i==0 or i==1 or i==2 or i==3:
all_info.append(getLogV1(det,i, entry1))
if i==4 or i==5 or i==6 or i==7:
all_info.append(getLogV1(det,i, entry2))
if i==8 or i==9 or i==10 or i==11:
all_info.append(getLogV1(det,i, entry3))
if i==12 or i==13 or i==14 or i==15:
all_info.append(getLogV1(det,i, entry4))
if i==16 or i==17 or i==18 or i==19:
all_info.append(getLogV1(det,i, entry5))
return all_info
def return_noise_data(filename,utc,nsec,data_dir):
#log_file=open(data_dir+filename+'.log','r')
root_file=ROOT.TFile.Open(data_dir+filename+'.root')
tree_sec = root_file.Get("Tree_sec")
tree_event = root_file.Get("Tree_event")
tree_log = root_file.Get("Tree_log")
tree_noise = root_file.Get("Tree_noise")
entry1=find_noise_number(utc,tree_noise,1)
entry2=find_noise_number(utc,tree_noise,5)
entry3=find_noise_number(utc,tree_noise,9)
entry4=find_noise_number(utc,tree_noise,13)
entry5=find_noise_number(utc,tree_noise,17)
all_info=[]
for i in np.arange(LORA.nLORA):
detname='Det'+str(1+i)
det=tree_noise.GetBranch(detname)
if i==0 or i==1 or i==2 or i==3:
all_info.append(getNoiseV1(det, entry1))
if i==4 or i==5 or i==6 or i==7:
all_info.append(getNoiseV1(det, entry2))
if i==8 or i==9 or i==10 or i==11:
all_info.append(getNoiseV1(det, entry3))
if i==12 or i==13 or i==14 or i==15:
all_info.append(getNoiseV1(det, entry4))
if i==16 or i==17 or i==18 or i==19:
all_info.append(getNoiseV1(det, entry5))
return all_info
def log_file(filename,data_dir):
filepath=data_dir+filename+'.log'
#log_file=open(data_dir+filename+'.log','r')
lasa1_status=-1
lasa2_status=-1
lasa3_status=-1
lasa4_status=-1
lasa5_status=-1
LOFAR_trig=-1
try:
with open(filepath,'r') as fp:
line = fp.readline()
cnt = 1
while line:
#print("Line {}: {}".format(cnt, line.strip()))
if 'LOFAR trigger settings' in line:
LOFAR_trig=int(line.strip().split(':')[1])
if 'CS003:' in line:
lasa1_status=int(line.strip().split(':')[1])
if 'CS004:' in line:
lasa2_status=int(line.strip().split(':')[1])
if 'CS005:' in line:
lasa3_status=int(line.strip().split(':')[1])
if 'CS006:' in line:
lasa4_status=int(line.strip().split(':')[1])
if 'CS007:' in line:
lasa5_status=int(line.strip().split(':')[1])
line = fp.readline()
cnt += 1
except:
print('can\'t find log file')
'''
print 'LOFAR trigger: ',LOFAR_trig
print 'lasa 1: ',lasa1_status
print 'lasa 2: ',lasa2_status
print 'lasa 3: ',lasa3_status
print 'lasa 4: ',lasa4_status
print 'lasa 5: ',lasa5_status
'''
info={'LOFAR_trig':LOFAR_trig,'lasa1_status':lasa1_status,'lasa2_status':lasa2_status,'lasa3_status':lasa3_status,'lasa4_status':lasa4_status,'lasa5_status':lasa5_status}
return info
def return_event_V2(event_id,event_GPS, event_ns,event_data):
Station=event_data['Station']
Detector=event_data['Detector']
Channel_Passed_Threshold=event_data['Channel_Passed_Threshold']
Trigg_Threshold=event_data['Trigg_Threshold']
Charge_Corrected=event_data['Charge_Corrected']
Peak_Height_Corrected=event_data['Peak_Height_Corrected']
Peak_Height_Raw=event_data['Peak_Height_Raw']
Waveform_Raw=event_data['Waveform_Raw']
Event_Id=event_data['Event_Id']
Run_Id=event_data['Run_Id']
GPS_Time_Stamp=event_data['GPS_Time_Stamp']
CTD=event_data['CTD']
nsec_Online=event_data['nsec_Online']
HiSparc_Trigg_Pattern=event_data['HiSparc_Trigg_Pattern']
HiSparc_Trigg_Condition=event_data['HiSparc_Trigg_Condition']
dets=Detector[Event_Id==event_id]
counts=Waveform_Raw[Event_Id==event_id]
pulse_height=Peak_Height_Corrected[Event_Id==event_id]
total_counts=Charge_Corrected[Event_Id==event_id]
trigger_pattern=HiSparc_Trigg_Pattern[Event_Id==event_id]
trigger_condition=HiSparc_Trigg_Condition[Event_Id==event_id]
nsec=nsec_Online[Event_Id==event_id]
ctd=CTD[Event_Id==event_id]
gps=GPS_Time_Stamp[Event_Id==event_id]
thresh=Trigg_Threshold[Event_Id==event_id]
print('threshold--> ',thresh)
#print counts.shape
all_info=[]
log_all_info=[]
for d in np.arange(40):
if (d+1) in dets:
ind=np.where(dets==(d+1))[0][0]
#print d+1, dets[ind], total_counts[ind]
timestamp = date.fromtimestamp(gps[ind])
ymd=int(str(timestamp.year).zfill(2)+str(timestamp.month).zfill(2)+str(timestamp.day).zfill(2))
info={'det':dets[ind],'ymd':ymd,'gps':gps[ind],'ctd':ctd[ind],'nsec':nsec[ind],'trigg_condition':trigger_condition[ind],'trigg_pattern':trigger_pattern[ind],'total_counts':total_counts[ind],'pulse_height':pulse_height[ind],'pulse_width':0,'counts':counts[ind]}
log_info={'threshold':thresh[ind]}
all_info.append(info)
log_all_info.append(log_info)
else:
info={'det':d+1,'ymd':0,'gps':0,'ctd':0,'nsec':0,'trigg_condition':0,'trigg_pattern':0,'total_counts':0,'pulse_height':0,'pulse_width':0,'counts':np.zeros([4000])}
log_info={'threshold':0}
all_info.append(info)
log_all_info.append(log_info)
return all_info,log_all_info
def return_second_data_V2(event_id,event_GPS, event_ns,osm_data_hisparc,osm_data_aera):
Station_H=osm_data_hisparc['Station']
Master_Or_Slave_H=osm_data_hisparc['Master_Or_Slave']
GPS_Time_Stamp_H=osm_data_hisparc['GPS_Time_Stamp']
Sync_Error_H=osm_data_hisparc['Sync_Error']
Quant_Error_H=osm_data_hisparc['Quant_Error']
CTP_H=osm_data_hisparc['CTP']
Station_A=osm_data_aera['Station']
GPS_Time_Stamp_A=osm_data_aera['GPS_Time_Stamp']
Sync_Error_A=osm_data_aera['Sync_Error']
Quant_Error_A=osm_data_aera['Quant_Error']
CTP_A=osm_data_aera['CTP']
UTC_offset_A=osm_data_aera['UTC_offset']
all_info=[]
all_info1=[]
all_info2=[]
for t in np.arange(3):
#for i in np.arange(1):
for i in np.arange(LORA.nLasaB):
lasa=i+1
#master
#print('getting OSM for {0}'.format(i+1))
if lasa<=5:
gpsM= GPS_Time_Stamp_H[(GPS_Time_Stamp_H==(event_GPS+t))*(Station_H==i+1)*(Master_Or_Slave_H==0)]
if len(gpsM>0):
syncM= Sync_Error_H[(GPS_Time_Stamp_H==(event_GPS+t))*(Station_H==i+1)*(Master_Or_Slave_H==0)]
quantM= Quant_Error_H[(GPS_Time_Stamp_H==(event_GPS+t))*(Station_H==i+1)*(Master_Or_Slave_H==0)]
ctpM= CTP_H[(GPS_Time_Stamp_H==(event_GPS+t))*(Station_H==i+1)*(Master_Or_Slave_H==0)]
#print gpsM
timestamp = date.fromtimestamp(gpsM)
ymdM=int(str(timestamp.year).zfill(2)+str(timestamp.month).zfill(2)+str(timestamp.day).zfill(2))
#slave
gpsS= GPS_Time_Stamp_H[(GPS_Time_Stamp_H==(event_GPS+t))*(Station_H==i+1)*(Master_Or_Slave_H==1)]
syncS= Sync_Error_H[(GPS_Time_Stamp_H==(event_GPS+t))*(Station_H==i+1)*(Master_Or_Slave_H==1)]
quantS= Quant_Error_H[(GPS_Time_Stamp_H==(event_GPS+t))*(Station_H==i+1)*(Master_Or_Slave_H==1)]
ctpS= CTP_H[(GPS_Time_Stamp_H==(event_GPS+t))*(Station_H==i+1)*(Master_Or_Slave_H==1)]
timestamp = date.fromtimestamp(gpsM)
ymdS=int(str(timestamp.year).zfill(2)+str(timestamp.month).zfill(2)+str(timestamp.day).zfill(2))
info={'lasa':lasa,'YMD_M':ymdM,'GPS_time_stamp_M':gpsM,'sync_M':syncM,'CTP_M':ctpM,'quant_M':quantM,'YMD_S':ymdS,'GPS_time_stamp_S':gpsS,'sync_S':syncS,'CTP_S':ctpS,'quant_S':quantS}
else:
info={'lasa':lasa,'YMD_M':np.asarray([0]),'GPS_time_stamp_M':np.asarray([0]),'sync_M':np.asarray([0]),'CTP_M':np.asarray([0]),'quant_M':np.asarray([0]),'YMD_S':np.asarray([0]),'GPS_time_stamp_S':np.asarray([0]),'sync_S':np.asarray([0]),'CTP_S':np.asarray([0]),'quant_S':np.asarray([0])}
else:
gps= GPS_Time_Stamp_A[(GPS_Time_Stamp_A==(event_GPS+t))*(Station_A==i+1)]
print(i+1,t, gps)
if len(gps>0):
sync= Sync_Error_A[(GPS_Time_Stamp_A==(event_GPS+t))*(Station_A==i+1)]
quant= Quant_Error_A[(GPS_Time_Stamp_A==(event_GPS+t))*(Station_A==i+1)]
ctp= CTP_A[(GPS_Time_Stamp_A==(event_GPS+t))*(Station_A==i+1)]
timestamp = date.fromtimestamp(gps)
ymd=int(str(timestamp.year).zfill(2)+str(timestamp.month).zfill(2)+str(timestamp.day).zfill(2))
info={'lasa':lasa,'YMD':ymd,'GPS_time_stamp':gps,'sync':sync,'CTP':ctp,'quant':quant}
else:
info={'lasa':lasa,'YMD':np.asarray([0]),'GPS_time_stamp':np.asarray([0]),'sync':np.asarray([0]),'CTP':np.asarray([0]),'quant':np.asarray([0])}
if t==0:
all_info.append(info)
if t==1:
all_info1.append(info)
if t==2:
all_info2.append(info)
return all_info,all_info1,all_info2
|
import pygame as py
import random as rn
py.init()
class gm:
clk=py.time.Clock()
scr=py.display.set_mode((512,512))
run=True
class sn:
x = rn.randint(0,32)
y = rn.randint(0,32)
def rec(s,x,y,w,h,c):
py.draw.rect(s,c,py.Rect(x,y,w,h))
def draw():
gm.rec(gm.scr,gm.sn.x*16,gm.sn.y*16,32,32,(255,0,0))
while gm.run:
gm.draw()
py.display.flip()
|
"""
Factorial and Fibonacci
Recursive vs iterative
Recursive:
DRY (helps not to repeat yourself)
Readability
Maintains state at different levels of recursion
But extra memory footprint (space complexity): larger stack because of additional function calls, adding functions to
the call stack, potentially stack overlow, take up stack space
> This can be solved with Tail Call Optimisation
When to use?
- Typically useful when dealing with trees or graphs traversals, sometimes when sorting (merge sort and quick sort)
- A problem that can be divided in subproblems
- All instances of subproblem is identical in nature
- Those solutions can be combined to solve the larger problem
"""
def factorial_recursive(num): # O(n)
if num <= 2:
if num == 0:
return 1
return num
return num * factorial_recursive(num - 1)
def factorial_iterative(num): # O(n)
if num <= 2:
if num == 0:
return 1
return num
factorial = num # factorial = 1 and then range(2, num+1)
for i in range(num - 1, 1, -1): # from num - 1 til 2 inclusive
factorial *= i
return factorial
print(factorial_recursive(5))
print(factorial_iterative(5))
print(factorial_recursive(0))
print(factorial_iterative(0))
# 0, 1, 2, 3, 4, 5, 6, 7, 8
# 0, 1, 1, 2, 3, 5, 8, 13, 21
def fibonacci_recursive(index): # O(2^n), exponential time --> sometimes turned into (O(n) with memoization or dynamic programming
if index < 2:
return index
return fibonacci_recursive(index - 2) + fibonacci_recursive(index - 1)
def fibonacci_iterative(index): # O(n)
''' Memory: O(n)
arr = [0, 1]
for i in range(2, index + 1):
arr.append(arr[i-2] + arr[i-1])
return arr[index]
'''
if index < 2:
return index
second_last = 0
last = 1
for i in range(2, index + 1):
sum = second_last + last
second_last = last
last = sum
return sum
print(fibonacci_iterative(6))
print(fibonacci_recursive(6)) # a lot slower
def reverse_string_recursive(string):
if len(string) == 0:
return string
return string[-1] + reverse_string_recursive(string[:-1])
def reverse_string_iterative(string):
'''
string_list = list(reversed(list(string)))
string = ""
for letter in string_list:
string += letter
return string
'''
reversed = ""
for char in range(len(list(string)) - 1, -1, -1):
reversed += string[char]
return reversed
print(reverse_string_recursive("lala ! o"))
print(reverse_string_iterative("lala ! o"))
print(reverse_string_recursive(""))
print(reverse_string_iterative(""))
|
import telnetlib,getpass
host = 'tis-sw-acc-1lab-1'
username = input('Username: ')
username = bytes(username + '\n', 'UTF8')
password = getpass.getpass()
password = bytes(password + '\n', 'UTF8')
enablepw = getpass.getpass()
enablepw = bytes(enablepw + '\n', 'UTF8')
session = telnetlib.Telnet()
session.open(host)
output = session.read_until(b'Username:',2)
print(output)
session.write(username)
output = session.read_until(b'Password:',2)
print(output)
session.write(password)
output = session.read_until(b'>',2)
print(output)
session.write(b'terminal length 0\n')
output = session.read_until(b'>',2)
print(output)
session.write(b'show vlan\n')
b_vlans = session.read_until(b'>',2)
session.write(b'exit\n')
session.close()
vlans = b_vlans.decode().split('\r\n')
|
import numpy as np
import pandas as pd
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['store1', 'store1', 'store2'])
df.head()
df.loc['store2']
df.iloc[1]
df['Item Purchased'] |
import mechanicalSoup
from bs4 import BeautifulSoup
"""
AURTHOR: BHARATH SUNDAR
VERSION: 1.0
INFO: Web Scrapper to get live cricket score and live sports news.
"""
news_url = ""
scores_url = ""
|
#!/usr/bin/env python3
#
# Based on examples from minikerberos by skelsec
# Parts of this code was inspired by the following project by @rubin_mor
# https://github.com/morRubin/AzureADJoinedMachinePTC
#
# Author:
# Tamas Jos (@skelsec)
# Dirk-jan Mollema (@_dirkjan)
#
import argparse
import logging
import binascii
import secrets
import datetime
import hashlib
import base64
from oscrypto.keys import parse_pkcs12, parse_certificate, parse_private
from oscrypto.asymmetric import rsa_pkcs1v15_sign, load_private_key
from asn1crypto import cms
from asn1crypto import algos
from asn1crypto import core
from asn1crypto import keys
from minikerberos import logger
from minikerberos.pkinit import PKINIT, DirtyDH
from minikerberos.common.ccache import CCACHE
from minikerberos.common.target import KerberosTarget
from minikerberos.network.clientsocket import KerberosClientSocket
from minikerberos.protocol.constants import NAME_TYPE, PaDataType
from minikerberos.protocol.encryption import Enctype, _checksum_table, _enctype_table, Key
from minikerberos.protocol.structures import AuthenticatorChecksum
from minikerberos.protocol.asn1_structs import KDC_REQ_BODY, PrincipalName, HostAddress, \
KDCOptions, EncASRepPart, AP_REQ, AuthorizationData, Checksum, krb5_pvno, Realm, \
EncryptionKey, Authenticator, Ticket, APOptions, EncryptedData, AS_REQ, AP_REP, PADATA_TYPE, \
PA_PAC_REQUEST
from minikerberos.protocol.rfc4556 import PKAuthenticator, AuthPack, Dunno2, MetaData, Info, CertIssuer, CertIssuers, PA_PK_AS_REP, KDCDHKeyInfo, PA_PK_AS_REQ
class myPKINIT(PKINIT):
"""
Copy of minikerberos PKINIT
With some changes where it differs from PKINIT used in NegoEx
"""
@staticmethod
def from_pfx(pfxfile, pfxpass, dh_params = None):
with open(pfxfile, 'rb') as f:
pfxdata = f.read()
return myPKINIT.from_pfx_data(pfxdata, pfxpass, dh_params)
@staticmethod
def from_pfx_data(pfxdata, pfxpass, dh_params = None):
pkinit = myPKINIT()
# oscrypto does not seem to support pfx without password, so convert it to PEM using cryptography instead
if not pfxpass:
from cryptography.hazmat.primitives.serialization import pkcs12
from cryptography.hazmat.primitives import serialization
privkey, cert, extra_certs = pkcs12.load_key_and_certificates(pfxdata, None)
pem_key = privkey.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
pkinit.privkey = load_private_key(parse_private(pem_key))
pem_cert = cert.public_bytes(
encoding=serialization.Encoding.PEM
)
pkinit.certificate = parse_certificate(pem_cert)
else:
#print('Loading pfx12')
if isinstance(pfxpass, str):
pfxpass = pfxpass.encode()
pkinit.privkeyinfo, pkinit.certificate, pkinit.extra_certs = parse_pkcs12(pfxdata, password =None)
pkinit.privkey = load_private_key(pkinit.privkeyinfo)
#print('pfx12 loaded!')
pkinit.setup(dh_params = dh_params)
return pkinit
@staticmethod
def from_pem(certfile, privkeyfile, dh_params = None):
pkinit = myPKINIT()
with open(certfile, 'rb') as f:
pkinit.certificate = parse_certificate(f.read())
with open(privkeyfile, 'rb') as f:
pkinit.privkey = load_private_key(parse_private(f.read()))
pkinit.setup(dh_params = dh_params)
return pkinit
def sign_authpack(self, data, wrap_signed = False):
return self.sign_authpack_native(data, wrap_signed)
def setup(self, dh_params = None):
self.issuer = self.certificate.issuer.native['common_name']
if dh_params is None:
print('Generating DH params...')
# self.diffie = DirtyDH.from_dict()
print('DH params generated.')
else:
#print('Loading default DH params...')
if isinstance(dh_params, dict):
self.diffie = DirtyDH.from_dict(dh_params)
elif isinstance(dh_params, bytes):
self.diffie = DirtyDH.from_asn1(dh_params)
elif isinstance(dh_params, DirtyDH):
self.diffie = dh_params
else:
raise Exception('DH params must be either a bytearray or a dict')
def build_asreq(self, domain = None, cname = None, kdcopts = ['forwardable','renewable','renewable-ok']):
if isinstance(kdcopts, list):
kdcopts = set(kdcopts)
if cname is not None:
if isinstance(cname, str):
cname = [cname]
else:
cname = [self.cname]
# if target is not None:
# if isinstance(target, str):
# target = [target]
# else:
# target = ['127.0.0.1']
now = datetime.datetime.now(datetime.timezone.utc)
kdc_req_body_data = {}
kdc_req_body_data['kdc-options'] = KDCOptions(kdcopts)
kdc_req_body_data['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': cname})
kdc_req_body_data['realm'] = domain.upper()
kdc_req_body_data['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': ['krbtgt', domain.upper()]})
kdc_req_body_data['till'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)
kdc_req_body_data['rtime'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)
kdc_req_body_data['nonce'] = secrets.randbits(31)
kdc_req_body_data['etype'] = [18,17] # 23 breaks...
# kdc_req_body_data['addresses'] = [HostAddress({'addr-type': 20, 'address': b'127.0.0.1'})] # not sure if this is needed
kdc_req_body = KDC_REQ_BODY(kdc_req_body_data)
checksum = hashlib.sha1(kdc_req_body.dump()).digest()
authenticator = {}
authenticator['cusec'] = now.microsecond
authenticator['ctime'] = now.replace(microsecond=0)
authenticator['nonce'] = secrets.randbits(31)
authenticator['paChecksum'] = checksum
dp = {}
dp['p'] = self.diffie.p
dp['g'] = self.diffie.g
dp['q'] = 0 # mandatory parameter, but it is not needed
pka = {}
pka['algorithm'] = '1.2.840.10046.2.1'
pka['parameters'] = keys.DomainParameters(dp)
spki = {}
spki['algorithm'] = keys.PublicKeyAlgorithm(pka)
spki['public_key'] = self.diffie.get_public_key()
authpack = {}
authpack['pkAuthenticator'] = PKAuthenticator(authenticator)
authpack['clientPublicValue'] = keys.PublicKeyInfo(spki)
authpack['clientDHNonce'] = self.diffie.dh_nonce
authpack = AuthPack(authpack)
signed_authpack = self.sign_authpack(authpack.dump(), wrap_signed = True)
payload = PA_PK_AS_REQ()
payload['signedAuthPack'] = signed_authpack
pa_data_1 = {}
pa_data_1['padata-type'] = PaDataType.PK_AS_REQ.value
pa_data_1['padata-value'] = payload.dump()
pa_data_0 = {}
pa_data_0['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))
pa_data_0['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()
asreq = {}
asreq['pvno'] = 5
asreq['msg-type'] = 10
asreq['padata'] = [pa_data_0, pa_data_1]
asreq['req-body'] = kdc_req_body
return AS_REQ(asreq).dump()
def sign_authpack_native(self, data, wrap_signed = False):
"""
Creating PKCS7 blob which contains the following things:
1. 'data' blob which is an ASN1 encoded "AuthPack" structure
2. the certificate used to sign the data blob
3. the singed 'signed_attrs' structure (ASN1) which points to the "data" structure (in point 1)
"""
da = {}
da['algorithm'] = algos.DigestAlgorithmId('1.3.14.3.2.26') # for sha1
si = {}
si['version'] = 'v1'
si['sid'] = cms.IssuerAndSerialNumber({
'issuer': self.certificate.issuer,
'serial_number': self.certificate.serial_number,
})
si['digest_algorithm'] = algos.DigestAlgorithm(da)
si['signed_attrs'] = [
cms.CMSAttribute({'type': 'content_type', 'values': ['1.3.6.1.5.2.3.1']}), # indicates that the encap_content_info's authdata struct (marked with OID '1.3.6.1.5.2.3.1' is signed )
cms.CMSAttribute({'type': 'message_digest', 'values': [hashlib.sha1(data).digest()]}), ### hash of the data, the data itself will not be signed, but this block of data will be.
]
si['signature_algorithm'] = algos.SignedDigestAlgorithm({'algorithm' : '1.2.840.113549.1.1.1'})
si['signature'] = rsa_pkcs1v15_sign(self.privkey, cms.CMSAttributes(si['signed_attrs']).dump(), "sha1")
ec = {}
ec['content_type'] = '1.3.6.1.5.2.3.1'
ec['content'] = data
sd = {}
sd['version'] = 'v3'
sd['digest_algorithms'] = [algos.DigestAlgorithm(da)] # must have only one
sd['encap_content_info'] = cms.EncapsulatedContentInfo(ec)
sd['certificates'] = [self.certificate]
sd['signer_infos'] = cms.SignerInfos([cms.SignerInfo(si)])
if wrap_signed is True:
ci = {}
ci['content_type'] = '1.2.840.113549.1.7.2' # signed data OID
ci['content'] = cms.SignedData(sd)
return cms.ContentInfo(ci).dump()
return cms.SignedData(sd).dump()
def decrypt_asrep(self, as_rep):
def truncate_key(value, keysize):
output = b''
currentNum = 0
while len(output) < keysize:
currentDigest = hashlib.sha1(bytes([currentNum]) + value).digest()
if len(output) + len(currentDigest) > keysize:
output += currentDigest[:keysize - len(output)]
break
output += currentDigest
currentNum += 1
return output
for pa in as_rep['padata']:
if pa['padata-type'] == 17:
pkasrep = PA_PK_AS_REP.load(pa['padata-value']).native
break
else:
raise Exception('PA_PK_AS_REP not found!')
ci = cms.ContentInfo.load(pkasrep['dhSignedData']).native
sd = ci['content']
keyinfo = sd['encap_content_info']
if keyinfo['content_type'] != '1.3.6.1.5.2.3.2':
raise Exception('Keyinfo content type unexpected value')
authdata = KDCDHKeyInfo.load(keyinfo['content']).native
pubkey = int(''.join(['1'] + [str(x) for x in authdata['subjectPublicKey']]), 2)
pubkey = int.from_bytes(core.BitString(authdata['subjectPublicKey']).dump()[7:], 'big', signed = False)
shared_key = self.diffie.exchange(pubkey)
server_nonce = pkasrep['serverDHNonce']
fullKey = shared_key + self.diffie.dh_nonce + server_nonce
etype = as_rep['enc-part']['etype']
cipher = _enctype_table[etype]
if etype == Enctype.AES256:
t_key = truncate_key(fullKey, 32)
elif etype == Enctype.AES128:
t_key = truncate_key(fullKey, 16)
elif etype == Enctype.RC4:
raise NotImplementedError('RC4 key truncation documentation missing. it is different from AES')
#t_key = truncate_key(fullKey, 16)
key = Key(cipher.enctype, t_key)
enc_data = as_rep['enc-part']['cipher']
logger.info('AS-REP encryption key (you might need this later):')
logger.info(binascii.hexlify(t_key).decode('utf-8'))
dec_data = cipher.decrypt(key, 3, enc_data)
encasrep = EncASRepPart.load(dec_data).native
cipher = _enctype_table[ int(encasrep['key']['keytype'])]
session_key = Key(cipher.enctype, encasrep['key']['keyvalue'])
return encasrep, session_key, cipher
def amain(args):
# Static DH params because the ones generated by cryptography are considered unsafe by AD for some weird reason
dhparams = {
'p':int('00ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece65381ffffffffffffffff', 16),
'g':2
}
logger.info('Loading certificate and key from file')
if args.pfx_base64:
pfxdata = base64.b64decode(args.pfx_base64)
ini = myPKINIT.from_pfx_data(pfxdata, args.pfx_pass, dhparams)
elif args.cert_pfx:
ini = myPKINIT.from_pfx(args.cert_pfx, args.pfx_pass, dhparams)
elif args.cert_pem and args.key_pem:
ini = myPKINIT.from_pem(args.cert_pem, args.key_pem, dhparams)
else:
logging.error('You must either specify a PFX file + optional password or a combination of Cert PEM file and Private key PEM file')
return
domain, username = args.identity.split('/')
req = ini.build_asreq(domain,username)
logger.info('Requesting TGT')
if not args.dc_ip:
args.dc_ip = domain
sock = KerberosClientSocket(KerberosTarget(args.dc_ip))
res = sock.sendrecv(req)
encasrep, session_key, cipher = ini.decrypt_asrep(res.native)
ccache = CCACHE()
ccache.add_tgt(res.native, encasrep)
ccache.to_file(args.ccache)
logger.info('Saved TGT to file')
def main():
import argparse
parser = argparse.ArgumentParser(description='Requests a TGT using Kerberos PKINIT and either a PEM or PFX based certificate+key')
parser.add_argument('identity', action='store', metavar='domain/username', help='Domain and username in the cert')
parser.add_argument('ccache', help='ccache file to store the TGT in')
parser.add_argument('-cert-pfx', action='store', metavar='file', help='PFX file')
parser.add_argument('-pfx-pass', action='store', metavar='password', help='PFX file password')
parser.add_argument('-pfx-base64', action='store', metavar='BASE64', help='PFX file as base64 string')
parser.add_argument('-cert-pem', action='store', metavar='file', help='Certificate in PEM format')
parser.add_argument('-key-pem', action='store', metavar='file', help='Private key file in PEM format')
parser.add_argument('-dc-ip', help='DC IP or hostname to use as KDC')
parser.add_argument('-v', '--verbose', action='count', default=0)
args = parser.parse_args()
if args.verbose == 0:
logger.setLevel(logging.INFO)
elif args.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(1)
amain(args)
if __name__ == '__main__':
main()
|
#! /usr/bin/env python3
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import pysam
import random
from signal import signal, SIGPIPE, SIG_DFL
# Handle broken pipes:
signal(SIGPIPE, SIG_DFL)
version = '1.1 (2017-05-16)'
parser = argparse.ArgumentParser(description="Randomly split BAM file into two. Input BAM file must be sorted by read name.")
parser.add_argument('-v', '--version', action='version', version='%(prog)s {0}'.format(version))
parser.add_argument('-d', '--directory', dest='directory', required=True, help='directory of inout BAM file')
parser.add_argument('-i', '--input', dest='filename', required=True, help='Input BAM file')
parser.add_argument('-f', '--fraction', dest='fraction', required=True, help='fraction of reads to write to one of the output BAM files - the remainder will be written to another BAM file')
args = parser.parse_args()
input = pysam.AlignmentFile(args.directory + '/' + args.filename, "rb")
output1=pysam.AlignmentFile(args.directory + '/output1_' + args.fraction + '_' + args.filename, "wb", template=input)
output2=pysam.AlignmentFile(args.directory + '/output2_' + str(1- float(args.fraction)) + '_' + args.filename, "wb", template=input)
written1={}
written2={}
for read in input.fetch():
if read.query_name not in written1:
if read.query_name not in written2:
if random.random() > float(args.fraction):
output2.write(read)
written2[read.query_name]=''
else:
output1.write(read)
written1[read.query_name]=''
else:
output2.write(read)
del written2[read.query_name]
else:
output1.write(read)
del written1[read.query_name]
|
import numpy as np
import pandas as pd
from sklearn.metrics import r2_score as r2
from sklearn.metrics import mean_squared_error as mse
def r2_for_last_n_cycles(y_act , y_hat, last_n=50):
ypred_n = []
y_act_n = []
for idx, cycle in enumerate(y_act):
# print(cycle)
if cycle <= last_n:
ypred_n.append(y_hat[idx])
y_act_n.append(cycle)
# print(len(ytrain_n))
# print(len(y_act_n))
return ("The r-squared for the last %s cycles is: " + str(mse(y_act_n, ypred_n) )) % last_n
################### Make a list of r squared values for plotting ##########
def r2_generator_last_n_cycles(y_act , y_hat, last_n=50):
r_squared_vals = []
# print (y_hat)
for n in range(last_n, 0, -1):
# print(n)
ypred_n = []
y_act_n = []
for idx, cycle in enumerate(y_act):
# print(n)
if cycle <= n:
# print(cycle, n)
ypred_n.append(float(y_hat[idx]) )
y_act_n.append(cycle)
# print(ytrain_n, y_act_n)
# print( len(y_act_n) , len(ypred_n))
r_squared_vals.append(mse(y_act_n , ypred_n) )
# print(len(ytrain_n), len(y_act_n), len(r_squared_vals))
return r_squared_vals
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import scipy.constants as const
import matplotlib.pyplot as plt
def sat( vgs, vds, w, l, kp, vth, lamb):
vdsat = vgs - vth
return ( ( kp / 2) * ( w / l) * (vgs - vth) ** 2) * \
( 1 + lamb * ( vds - vdsat))
def triode( vgs, vds, w, l, kp, vth, lamb):
return kp * w / l * ( ( vgs - vth) * vds - ( ( vds ** 2) / 2))
def drain( vgs, vds, w, l, kp, vth, lamb):
if vgs > vth:
if vds < vgs - vth:
return triode( vgs, vds, w, l, kp, vth, lamb)
else:
return sat( vgs, vds, w, l, kp, vth, lamb)
else:
return 0.0
def drain_sat( vds, w, l, kp):
return ( kp / 2) * ( w / l) * vds ** 2
if __name__ == "__main__":
w = 10
l = 2
kp = 100 * const.micro
vth = 0.3
lamb = 0.1
vgs = 0.7
arrow = dict(facecolor = 'black',
arrowstyle = '->')
x = np.linspace(0, 1, 100)
x2 = np.linspace(0, vgs - vth + 0.01)
mos = np.vectorize( drain)
id = mos( vgs, x, w, l, kp, vth, lamb)
idsat = drain_sat( x2, w, l, kp)
fig, ax = plt.subplots()
plt.plot( x, id, 'r', linewidth = 2)
plt.plot( x2, idsat, 'k--')
plt.text( 0.7, 0.5 * np.max(id), r"nasycenie",
horizontalalignment = 'center', fontsize = 15)
point = 0.75 * ( vgs - vth)
ax.annotate('zakres liniowy (triodowy)',
xy = ( 0.5 * point, 0.4 * drain( vgs, point, w, l, kp, vth, lamb)),
textcoords = 'axes fraction', xytext = (0.35, 0.25), size = 15,
arrowprops = arrow)
ax.annotate('$V_{DS,sat}$ i $I_{D,sat}$',
xy = ( vgs - vth, drain( vgs, vgs - vth, w, l, kp, vth, lamb)),
textcoords = 'axes fraction', xytext = (0.1, 0.9), size = 15,
arrowprops = arrow)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
plt.xlabel('Napięcie dren - źródło')
plt.ylabel('Prąd drenu')
plt.show()
|
# coding: utf-8
#
#WebClient のプロキシ設定確認用サンプル
from System import *
from System.Net import *
wc = WebClient()
wc.BaseAddress = "http://www.google.co.jp/"
print wc.Proxy.GetProxy(Uri("http://www.google.co.jp/"))
print wc.DownloadString("")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.