text stringlengths 38 1.54M |
|---|
import random
MIN_MONEY = 50
MAX_MONEY = 5000
PIN_LENGTH = 4
DEBUG = True
pin = ""
for i in range(PIN_LENGTH):
pin += str(random.randrange(0,10))
balance = random.randrange(MIN_MONEY, MAX_MONEY)
if DEBUG:
pin = "1234"
remaining_attempts = 3
while remaining_attempts > 0:
pin_attempt = input("Sir, we require your PIN number: ")
if pin_attempt == pin:
break
remaining_attempts -= 1
if remaining_attempts == 0:
exit()
menu = """Welcome
---------------------
1- Deposit
2- Cash withdrawal
3- Exit
Choose the operation: """
while True:
action = input(menu)
if action == "1" or action == "Deposit":
amount = int(input("Please enter the amount to deposit (in EUROS): "))
balance += amount
print("You withdrew " + str(amount) + "€ succesfully. Your current balance is: " + str(balance) + "€")
elif action == "2" or action == "Cash withdrawal":
amount = int(input("Please enter the amount to withdrawal (in EUROS): "))
if amount <= balance:
balance -= amount
print("You deposited " + str(amount) + "€ succesfully. Your current balance is: " + str(balance) + "€")
else:
print("Insufficient funds. Your current balance is: " + str(balance) + "€")
elif action == "3" or action == "Exit":
exit()
|
import glob
import os
import random
import sys
import gzip
import pickle
import argparse
import numpy as np
from config import BabiConfig, BabiConfigJoint
from train_test import train, train_linear_start, test
from util import parse_babi_task, build_model
import fasttext
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val) # for reproducing
# EMBEDDINGS_MODEL_PATH = '../fastText/result/fil9.bin'
def run_tableQA(data_path, model_file):
"""
Train and test for table QA
"""
# Parse data
train_files = glob.glob(data_path.format('train'))
test_files = glob.glob(data_path.format('test'))
# SV: init dict with pre-trained vectors, e.g. from fastText
# dictionary = fasttext.load_model(EMBEDDINGS_MODEL_PATH)
dictionary = {"nil": 0}
train_story, train_questions, train_qstory = parse_babi_task(train_files, dictionary, False)
test_story, test_questions, test_qstory = parse_babi_task(test_files, dictionary, False)
# print test_questions
print 'Dictionary:', len(dictionary)
general_config = BabiConfig(train_story, train_questions, dictionary)
memory, model, loss = build_model(general_config)
if general_config.linear_start:
train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)
else:
train(train_story, train_questions, train_qstory, memory, model, loss, general_config)
test(test_story, test_questions, test_qstory, memory, model, loss, general_config)
# save_model
with gzip.open(model_file, "wb") as f:
print("Saving model to file %s ..." % model_file)
reversed_dict = dict((ix, w) for w, ix in dictionary.items())
pickle.dump((reversed_dict, memory, model, loss, general_config), f)
if __name__ == "__main__":
dataset = 'sim'
# test - small subset of synthetic data
# original MemN2N performance ::: train error: 0 | val error: 0 Test error: 0.000000
# synth - larger set with synthetic data
# sim - simulated data, generated using real table data but with artificially reduced domain variance
# table - real table data extracted from a random open data csv file
data_path = './data/%s_data_{}.txt' % dataset
run_tableQA(data_path, './trained_model/memn2n_table_qa_model_%s.pklz' % dataset)
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
import json
import os
import re
import sys
import socket
import commands
if os.geteuid() != 0 :
print ("此脚本必须以root用户身份运行")
sys.exit(-1)
psutil_or_not = commands.getoutput("rpm -qa |grep psutil")
if not psutil_or_not:
result, info = commands.getstatusoutput("yum -y install python2-psutil")
if result == 0:
import psutil
else:
print('错误:psutil模块(python2-psutil)安装失败')
sys.exit(-1)
else:
import psutil
smartcl_or_not = commands.getoutput("rpm -qa |grep smartmontools")
if not smartcl_or_not:
result, info = commands.getstatusoutput("yum -y install smartmontools")
if result != 0:
print('错误:smartmontools工具安装失败')
sys.exit(-1)
class Machine():
def __init__(self):
self.data = {}
self.data['hostname'] = socket.gethostname()
def getCPUstatus(self):
self.data['CpuStatus'] = {}
CPU_Status = commands.getoutput("top -bn 1 -i -c |sed -n '3p' | awk -F [:] '{print $2}'")
for i in CPU_Status.split(','):
self.data['CpuStatus'][i.split()[1]] = i.split()[0]
CpuUsedPer = commands.getoutput("top -bn 1 -i -c |sed -n '3p'|awk '{print $8}'")
self.data['CpuStatus']['CpuUsedPer'] = str(100 - float(CpuUsedPer)) + '%'
def getMemStatus(self):
MemTotal = commands.getoutput("grep MemTotal /proc/meminfo | awk '{print $2}'")
MemFree = commands.getoutput("grep MemFree /proc/meminfo | awk '{print $2}'")
MemUsed = int(MemTotal) - int(MemFree)
self.data['Mem'] = {}
self.data['Mem']['Total'] = str(int(MemTotal) / 1024) + 'MB'
self.data['Mem']['Free'] = str(int(MemFree) / 1024) + 'MB'
self.data['Mem']['Used'] = str(MemUsed / 1024) + 'MB'
self.data['Mem']['UsedPercent'] = str(round(float(MemUsed) / float(MemTotal) * 100, 2)) + '%'
def getDiskStatus(self):
diskcmd = "df -hTP | sed '1d' | awk '$2!=\"tmpfs\"{print}'|awk '$2!=\"devtmpfs\"{print}'|awk '$7!=\"/boot\"{print}'"
DiskStatus = commands.getoutput(diskcmd)
disklist = DiskStatus.split('\n')
self.data['Disk'] = {}
for i in disklist:
b = i.split()
self.data['Disk'][b[0]] = {}
self.data['Disk'][b[0]]['Size'] = b[2]
self.data['Disk'][b[0]]['Used'] = b[3]
self.data['Disk'][b[0]]['Avail'] = b[4]
self.data['Disk'][b[0]]['Use%'] = b[5]
self.data['Disk'][b[0]]['Mounted on'] = b[6]
def getPdStatus(self):
self.data['Pdisk'] = {}
for i in range(0, 100):
j = i + 1
pdiskHealth = commands.getoutput("/sbin/smartctl -a -d megaraid,%s / |grep 'self-assessment\|SMART Health Status' |awk '{print $NF}'" %i)
if pdiskHealth:
self.data['Pdisk'][str(j)] = {}
pdiskSize = commands.getoutput("/sbin/smartctl -a -d megaraid,%s / |grep -i 'User Capacity' |awk -F '[' '{print $2}'|awk -F ']' '{print $1}'" %i)
pdiskWear = str(int(commands.getoutput("/sbin/smartctl -a -d megaraid,%s / |grep '245' |awk '{print $4}'" %i))) + '%'
pdiskModel = commands.getoutput("/sbin/smartctl -a -d megaraid,%s /|grep 'Device Model\|Transport protocol'|awk '{print $NF}'|cut -b 1,2" %i)
if pdiskModel == 'SS':
pdiskType = 'SSD'
elif pdiskModel == 'SA':
pdiskType = 'SAS'
elif pdiskModel == 'ST':
pdiskType = 'SATA'
else:
pdiskType = 'SSD'
self.data['Pdisk'][str(j)] = {'Size': pdiskSize, 'Wear': pdiskWear, 'Model': pdiskType}
else:
break
def getNetworkStatus(self):
nic = commands.getoutput(
"/sbin/ip add show | grep -E 'BROADCAST'|grep -v 'veth\|qg\|tap\|qv\|qb\|vir\|br\|docker\|em3\|em4\|ovs-system:\|vxlan_sys' |awk '{print $2$(NF-2)}'")
niclist = nic.replace(':', ' ').split('\n')
self.data['Nic'] = {}
for i in niclist:
b = i.split()
self.data['Nic'][b[0]] = b[1]
ip = commands.getoutput(
"/sbin/ip add show | grep 'inet'|grep 'global'|grep -v 'veth\|qg' |grep 'bond\|br'|grep -v 'virbr' |awk '{print $2}' |awk -F[/] '{print $1}'|tr '\n' ' '")
iplist = ip.strip().split()
self.data['Net'] = {}
self.data['Net']['ip'] = iplist
dns = commands.getoutput(
"/bin/grep nameserver /etc/resolv.conf| grep -v '#' | awk '{print $2}'|sed -n '1p'")
self.data['Net']['dns'] = dns
gateway = commands.getoutput("/sbin/ip route | grep default | awk '{print $3}'")
self.data['Net']['gateway'] = gateway
def getOpenstackSystemStatus(self):
self.data['OpenstackSystem'] = {}
OpenstackServer = commands.getoutput(
"ls -l /etc/systemd/system/multi-user.target.wants/ | egrep 'openstack|neutron' | awk '{print $9}' |grep -v 'ovs'")
status, result = commands.getstatusoutput("ls -l /etc/systemd/system/multi-user.target.wants/ |grep openstack")
if status != 0:
self.data['OpenstackSystem']["System"] = None
else:
httpstatus, httpresult = self.ServerCheck('http')
if httpstatus == 'active':
self.data['OpenstackSystem'][httpresult] = 'up'
for i in OpenstackServer.split():
ServerStatus, ServerResult = self.ServerCheck(i)
if ServerStatus == 'active':
self.data['OpenstackSystem'][i] = 'up'
else:
self.data['OpenstackSystem'][i] = 'down'
def getOtherServerStatus(self):
self.data['OtherServer'] = {}
otherserver = ['redis.service', 'redis-sentinel.service', 'mariadb', 'rabbitmq', 'mongod', 'memcache', 'haproxy', 'keepalived']
for i in otherserver:
status, result = self.ServerCheck(i)
if status == 'active':
self.data['OtherServer'][result] = 'up'
elif status == 1:
self.data['OtherServer'][result] = 'None'
else:
self.data['OtherServer'][result] = 'down'
def getOpenstackServerStatus(self):
self.data['OpenstackServer'] = {}
novaserver = commands.getoutput(
"source /root/keystonerc_admin && nova service-list |grep nova |awk -F '\|' '{print $3\" \"$4\" \"$7}' 2>/dev/null")
self.OpenStackServerTra(novaserver, 'novaserver')
neutronserver = commands.getoutput(
"source /root/keystonerc_admin && neutron agent-list |grep neutron |awk -F '\|' '{print $8\" \"$4\" \"$7}' 2>/dev/null")
self.OpenStackServerTra(neutronserver, 'neutronserver')
cinderserver = commands.getoutput(
"source /root/keystonerc_admin && cinder service-list | grep cinder |awk -F '\|' '{print $2\" \"$3\" \"$6}' 2>/dev/null")
self.OpenStackServerTra(cinderserver, 'cinderserver')
user = commands.getoutput(
"source /root/keystonerc_admin && keystone user-list 2>/dev/null |grep 'nova\|swift\|neutron\|aodh\|ceilometer\|glance\|gnocchi\|heat\|manila\|cinder\|designate' |awk -F '\|' '{print $3\" \"$4}' 2>/dev/null")
self.OpenStackServerTra(user, 'user')
tenant = commands.getoutput(
"source /root/keystonerc_admin && keystone tenant-list 2>/dev/null|grep 'admin\|services' | awk -F '\|' '{print $3\" \"$4}' 2>/dev/null")
self.OpenStackServerTra(tenant, 'tenant')
def OpenStackServerTra(self, server, servername):
serverstatus = server.split('\n')
self.data['OpenstackServer'][servername] = {}
num = 1
for i in serverstatus:
a = i.split(' ')
m = [x for x in a if x != '']
if len(m) == 3:
self.data['OpenstackServer'][servername][num] = {}
self.data['OpenstackServer'][servername][num][m[0]] = {}
self.data['OpenstackServer'][servername][num][m[0]] = {'host': m[1], 'status': m[2]}
num += 1
else:
self.data['OpenstackServer'][servername][m[0]] = m[1]
def ServerCheck(self, server):
serverstatus, serverresult = commands.getstatusoutput("ls -l /etc/systemd/system/multi-user.target.wants/ |grep -w %s |awk '{print $9}'" %server)
if serverstatus == 0 and serverresult != '':
status = commands.getoutput("systemctl status %s|grep Active |awk '{print $2}'" %serverresult)
return status, serverresult
else:
return (1, server)
def getCephClusterStatus(self):
self.data['CephClusterStatus'] = {}
self.data['CephClusterStatus']['ClusterHealth'] = {}
CephClusterHealth = commands.getoutput("ceph -s 2>/dev/null|grep health|awk '{print $2}'|awk -F[_] '{print $2}'")
CephErrorInfo = commands.getoutput("ceph -s 2>/dev/null |awk '/health/,/monmap/{print}'|grep -v 'health\|monmap'")
self.data['CephClusterStatus']['ClusterHealth']['status'] = CephClusterHealth
if CephErrorInfo:
self.data['CephClusterStatus']['ClusterHealth']['error'] = CephErrorInfo
self.data['CephClusterStatus']['CephClusterVolume'] = {}
CephClusterVolume = commands.getoutput("ceph osd df 2>/dev/null|grep TOTAL |awk '{print $3\" \"$2\" \"$5\"%\"}'").split()
self.data['CephClusterStatus']['CephClusterVolume'] = {'Use': CephClusterVolume[0], 'Size': CephClusterVolume[1], 'UsePer': CephClusterVolume[2]}
self.data['CephClusterStatus']['CephOsdStatus'] = {}
self.data['CephClusterStatus']['CephOsdStatus']['OsdStatus'] = {}
self.data['CephClusterStatus']['CephOsdStatus']['VolumeStatus'] = {}
self.data['CephClusterStatus']['CephOsdStatus']['OsdHealth'] = {}
CephOsdStatus = commands.getoutput("ceph osd tree 2>/dev/null |grep 'osd\.' |awk '{print $3\" \"$4}'|sort |uniq").split('\n')
for i in CephOsdStatus:
osdstatus = i.split()
self.data['CephClusterStatus']['CephOsdStatus']['OsdStatus'][osdstatus[0]] = osdstatus[1]
CephVolumeStatus = commands.getoutput("ceph osd df 2>/dev/null|grep -v 'TOTAL\|MIN\|ID' |awk '{print \"osd.\"$1\" \"$4\" \"$5\" \"$7\"%\"}' |sort|uniq").split('\n')
for j in CephVolumeStatus:
volumestatus= j.split()
self.data['CephClusterStatus']['CephOsdStatus']['VolumeStatus'][volumestatus[0]] = {'Size': volumestatus[1], 'Use': volumestatus[2], 'UsePer': volumestatus[3]}
osdDict = self.data['CephClusterStatus']['CephOsdStatus']['OsdStatus']
osdcheck = [k for k, v in osdDict.items() if v == 'down']
if osdcheck:
self.data['CephClusterStatus']['CephOsdStatus']['OsdHealth'] = {'status': 'Error', 'osd': osdcheck}
else:
self.data['CephClusterStatus']['CephOsdStatus']['OsdHealth'] = {'status': 'OK'}
osdnum = commands.getoutput("ceph osd ls 2>/dev/null |wc -l")
self.data['CephClusterStatus']['OsdNum'] = osdnum
poolnum = commands.getoutput("ceph osd pool ls|wc -l")
self.data['CephClusterStatus']['PoolNum'] = poolnum
pgnum = commands.getoutput("ceph pg stat 2>/dev/null |awk '{print $2}'")
self.data['CephClusterStatus']['PgNum'] = pgnum
def getQiyunResinStatus(self):
self.data['Qiyun'] = {}
resindict = {}
self.data['Qiyun']['resin'] = {}
resinfile = commands.getoutput('ls -lh /apps/sh/resin |grep resin')
for i in resinfile.split('\n'):
resininfo = re.findall('.*resin_(.*)_(.*).sh', i)
if resininfo:
resindict[resininfo[0][0]] = resininfo[0][1]
for key, value in resindict.items():
if self.ProcessCheck(value):
self.data['Qiyun']['resin'][key] = 'up'
else:
self.data['Qiyun']['resin'][key] = 'down'
def ProcessCheck(self, port):
serverPID = commands.getoutput("netstat -anlp |grep -w %s |grep LISTEN |awk '{print $7}'|awk -F '/' '{print $1}'" %port)
try:
p = psutil.Process(int(serverPID))
return True
except Exception as e:
return False
def get_info(self):
self.getMemStatus()
self.getCPUstatus()
self.getDiskStatus()
self.getPdStatus()
self.getNetworkStatus()
self.getOpenstackSystemStatus()
if 'control' in self.data['hostname']:
self.getOpenstackServerStatus()
self.getOtherServerStatus()
if os.path.exists('/etc/ceph/ceph.client.admin.keyring'):
self.getCephClusterStatus()
if os.path.exists('/apps/sh/resin'):
self.getQiyunResinStatus()
return json.dumps(self.data)
if __name__ == '__main__':
machine = Machine()
# machine.getOpenstackSystem()
# print Machine().hostname
print machine.get_info()
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:fsh
#time:'2017/12/9 13:44:56下午'
from django.conf.urls import url,include
from .views import CourseListView,CourseDetailView,CourseVideoView,CommentsView,AddCommentView
urlpatterns = [
# '''课程列表页'''
url(r'^list/$',CourseListView.as_view(),name='course_list'),
url(r'^detail/(?P<course_id>\d+)/$',CourseDetailView.as_view(),name='course_detail'),
url(r'^info/(?P<course_id>\d+)/$',CourseVideoView.as_view(),name='course_video'),
url(r'^comment/(?P<course_id>\d+)/$',CommentsView.as_view(),name='course_comment'),#课程评论
url(r'^add_comment/$',AddCommentView.as_view(),name='add_comment'),#课程评论
] |
from re import *
rule='[kK][lL][0-9]{2}[a-z]{1,2}\d{4}'
f=open("regno","r")
lst=[]
for line in f:
regno=line.rstrip("\n")
matcher = fullmatch(rule, regno)
if matcher != None:
lst.append(regno)
else:
continue
print(lst) |
import json
class JSONHandler:
def __init__(self, bot):
self.bot = bot
def process(self, jsonData, connection):
data = json.loads(jsonData)
if data["type"] == "cmd":
if data["cmdtype"] in self.bot.commands:
command = self.bot.commands[data["cmdtype"]]
commandInstance = command()
commandInstance.setup(data["payload"])
ret = commandInstance.execute()
return (True, ret)
else:
return (True, "[+] COMMAND NOT SUPPORTED")
return (True, json.dumps(data).encode("utf-8")) |
# coding=utf-8
def choose_sum(number_list: list, sum_number: int, number: int) -> int:
if number == 1:
if sum_number in number_list:
return 1
else:
return 0
elif not number_list:
return 0
else:
number_list1 = number_list[:]
number_list2 = number_list[:]
number_list1.pop(0)
used = number_list2.pop(0)
res = choose_sum(number_list1, sum_number, number) + choose_sum(number_list2, sum_number-used, number-1)
return res
if __name__ == '__main__':
while True:
n, x = map(int, input().split())
if n == 0 and x == 0:
break
used_numbers = [i for i in range(1, n+1)]
patterns = choose_sum(used_numbers, x, 3)
print(patterns)
|
from pymongo import MongoClient
from pymongo.son_manipulator import ObjectId
import os
import numpy as np
import datetime
import time
__client = MongoClient(os.environ['MONGO_DAQ_URI'])
db = __client['xebra_daq']
experiment = 'xebra'
n_pmts = 8
drift_length = 7 # in cm
MAX_RUN_ID = 999999 # because reasons
def _GetRundoc(run_id):
query = {'run_id' : min(int(run_id), MAX_RUN_ID), 'experiment' : experiment}
doc = db['runs'].find_one(query)
#if doc is None:
# raise ValueError('No run with id %d' % run_id)
return doc # returns None if no doc
def GetRawPath(run_id):
doc = _GetRundoc(run_id)
return '/data/storage/strax/raw/live'
if doc is not None:
try:
return doc['data']['raw']['location']
except KeyError:
pass
return '/data/storage/strax/raw/unsorted/%s' % run_id
def GetReadoutThreads(run_id):
doc = _GetRundoc(run_id)
if doc is not None:
try:
return doc['config']['processing_threads']['charon_reader_0']
except KeyError:
pass
return 2
def GetGains(run_id):
doc = _GetRundoc(run_id)
if doc is None:
return np.ones(n_pmts)
run_start = datetime.datetime.timestamp(doc['start'])
try:
earlier_doc = list(db['pmt_gains'].find({'time' : {'$lte' : run_start}}).sort([('time', -1)]).limit(1))[0]
except IndexError:
return np.ones(n_pmts)
try:
later_doc = list(db['pmt_gains'].find({'time' : {'$gte' : run_start}}).sort([('time', 1)]).limit(1))[0]
except IndexError:
return np.array(earlier_doc['adc_to_pe'])
#earlier_cal = int(str(earlier_doc['_id'])[:8], 16)
#later_cal = int(str(later_doc['_id'])[:8], 16)
earlier_cal = earlier_doc['time']
later_cal = later_doc['time']
return np.array([np.interp(doc['start'].timestamp(),
[earlier_cal,later_cal],
[earlier_doc['adc_to_pe'][ch], later_doc['adc_to_pe'][ch]])
for ch in range(len(earlier_doc['adc_to_pe']))])
def GetELifetime(run_id):
return 10e3 # 10 us
def GetRunStart(run_id):
rundoc = _GetRundoc(run_id)
if rundoc is not None:
return int(rundoc['start'].timestamp()*1e9)
return int(time.time()*1e9)
def GetNChan(run_id):
rundoc = _GetRundoc(run_id)
if rundoc is not None:
try:
board_id = rundoc['config']['boards'][0]['board']
return len(rundoc['config']['channels'][str(board_id)])
except KeyError:
pass
return n_pmts
def GetDriftVelocity(run_id):
rundoc = _GetRundoc(run_id)
if rundoc is not None:
if 'cathode_mean' in rundoc:
# from Jelle's thesis: v (mm/us) = 0.71*field**0.15 (V/cm)
gate_mean = rundoc['cathode_mean'] - 280 * rundoc['cathode_current_mean']
return 7.1e-4*((rundoc['cathode_mean'] - gate_mean)/drift_length)**0.15
return 1.8e-3 # 500 V/cm
|
from __future__ import annotations
from typing import final, List, Optional
class T:
def __init__(self, base: bool) -> None:
self.__base: bool = base
@classmethod
def supt(cls, t1: T, t2: T) -> T:
return t2 if t1 <= t2 else t1 if t2 <= t1 else None
@classmethod
def subt(cls, t1: T, t2: T) -> T:
return t1 if t1 <= t2 else t2 if t2 <= t1 else None
@property
def base(self) -> bool:
return self.__base
@final
class Real(T):
__inst: Real = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) in [Real, Cmplx, Sym]
def __str__(self) -> str:
return 'Real'
@classmethod
def inst(cls) -> Real:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Real()
return cls.__inst
@final
class Cmplx(T):
__inst: Cmplx = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) in [Cmplx, Sym]
def __str__(self) -> str:
return 'Complex'
@classmethod
def inst(cls) -> Cmplx:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Cmplx()
return cls.__inst
@final
class Str(T):
__inst: Str = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) in [Str, Sym]
def __str__(self) -> str:
return 'String'
@classmethod
def inst(cls) -> Str:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Str()
return cls.__inst
@final
class Bool(T):
__inst: Bool = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) in [Bool, Sym]
def __str__(self) -> str:
return 'Bool'
@classmethod
def inst(cls) -> Bool:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Bool()
return cls.__inst
@final
class Sym(T):
__inst: Sym = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) == Sym
def __str__(self) -> str:
return 'Symbol'
@classmethod
def inst(cls) -> Sym:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Sym()
return cls.__inst
@final
class Void(T):
__inst: Void = None
def __init__(self) -> None:
super().__init__(True)
def __le__(self, other: T) -> bool:
return type(other) in [Void, Sym]
def __str__(self) -> str:
return 'Void'
@classmethod
def inst(cls) -> Void:
"""
Getter for singleton object.
If it is the first time calling this, it initializes the singleton objects.
This automatically supports so called lazy initialization.
:return: Singleton object.
:rtype: Real
"""
if not cls.__inst:
cls.__inst = Void()
return cls.__inst
@final
class Tens(T):
def __init__(self, chd_t: T, dim: List[int]) -> None:
super().__init__(False)
self.__chd_t: T = chd_t
self.__dim: List[int] = dim
self.__fold: int = len(dim)
def __le__(self, other: T) -> bool:
other_t: type = type(other)
if other_t == Sym:
return True
elif other_t == Tens:
return self.__chd_t <= other.chd_t and len(self.__dim) == len(other.dim)
elif other_t == Arr:
return self.__chd_t <= other.chd_t and len(self.__dim) == other.fold
else:
return False
def __eq__(self, other: T) -> bool:
return type(other) == Tens and self.__chd_t == other.chd_t and self.__fold == other.fold
def __ne__(self, other: T) -> bool:
return type(other) != Tens or self.__chd_t != other.chd_t or self.__fold != other.fold
def __str__(self) -> str:
return f'List of {self.__chd_t} ({self.__fold} fold)'
@property
def chd_t(self) -> T:
return self.__chd_t
@property
def dim(self) -> List[int]:
return self.__dim
@property
def fold(self) -> int:
return self.__fold
@final
class Arr(T):
def __init__(self, chd_t: T, fold: int, dim: List[T] = None) -> None:
super().__init__(False)
self.__chd_t: T = chd_t
self.__fold: int = fold
self.__dim: List[T] = dim
def __le__(self, other: T) -> bool:
return (type(other) == Sym) or \
(type(other) == Arr and self.__chd_t <= other.chd_t and self.__fold == other.fold)
def __eq__(self, other: T) -> bool:
return type(other) == Arr and self.__chd_t == other.chd_t and self.__fold == other.fold
def __ne__(self, other: T) -> bool:
return type(other) != Arr or self.__chd_t != other.chd_t or self.__fold != other.fold
def __str__(self) -> str:
return f'List of {self.__chd_t} ({self.__fold} fold)'
@property
def chd_t(self) -> T:
return self.__chd_t
@property
def dim(self) -> List[T]:
return self.__dim
@property
def fold(self) -> int:
return self.__fold
@final
class ArrFact:
__inst: ArrFact = None
@classmethod
def inst(cls) -> ArrFact:
if not cls.__inst:
cls.__inst = ArrFact()
return cls.__inst
def get_arr_t(self, chd_t: List[T]) -> Optional[T]:
if not chd_t:
return Tens(Void.inst(), [0])
res_t: T = chd_t[0]
for i in range(len(chd_t) - 1):
res_t = T.supt(res_t, chd_t[i + 1])
if not res_t:
return None
if res_t.base:
if type(res_t) == Void:
return Tens(Void.inst(), [0])
elif type(res_t) == Sym:
return res_t
else:
return Tens(res_t, [len(chd_t)])
if type(res_t) == Arr:
return Arr(res_t.chd_t, res_t.fold, chd_t)
else:
if len(chd_t) == 1:
return Tens(chd_t[0].chd_t, [1, *res_t.dim])
homo = all([d is not None for d in res_t.dim])
i: int = 2
while homo and i < len(chd_t):
homo &= (chd_t[0].dim == chd_t[i].dim)
i += 1
return Tens(res_t.chd_t, [len(chd_t), *res_t.dim]) if homo else Arr(res_t.chd_t, len(res_t.dim) + 1, chd_t)
def coerce_arr_t(self, src: T, chd_t: T) -> T:
if type(chd_t) == Sym:
return chd_t
else:
return Tens(chd_t, src.dim) if type(src) == Tens else Arr(chd_t, src.fold, src.dim)
def idx_arr_t(self, src: T) -> T:
if type(src) == Tens:
return src.chd_t if src.fold == 1 else Tens(src.chd_t, src.dim[1:])
else:
return Arr(src.chd_t, src.fold - 1)
|
import redis
from common.message import msg_const
from exts import logger, config
redis_valid_time = 60 * 60
class RedisClient:
def __init__(self):
self.host = config.REDIS_HOST
self.port = config.REDIS_PORT
@property
def redis_client(self):
try:
pool = redis.ConnectionPool(host=self.host, port=self.port, max_connections=10000)
client = redis.StrictRedis(connection_pool=pool)
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
return None
else:
return client
def get(self, key):
try:
redis_instance = self.redis_client.get(key)
if not redis_instance:
return None
try:
res = eval(redis_instance)
except:
res = str(redis_instance, encoding='utf-8')
return res
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
return None
def set(self, key, value, default_valid_time=redis_valid_time):
try:
self.redis_client.set(key, value, default_valid_time)
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
def setnx(self, key, value, time=5):
try:
return self.redis_client.set(key, value, ex=time, nx=True)
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
def delete(self, key):
try:
self.redis_client.delete(key)
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
def incr_instance(self, key, amount=1):
try:
self.redis_client.incr(key, amount)
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
def decr_instance(self, key, amount=1):
try:
self.redis_client.decr(key, amount)
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
def m_get(self, keys):
try:
return self.redis_client.mget(keys)
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
def m_set(self, **kwargs):
try:
return self.redis_client.mset(**kwargs)
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
def expire_at(self, key, value):
try:
return self.redis_client.expireat(key, value)
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
def register_script(self, script, keys, args):
try:
cmd = self.redis_client.register_script(script)
return cmd(keys=keys, args=args)
except Exception as e:
logger.info(e)
logger.error(msg_const.REDIS_CONNECTION_500)
redis_cli = RedisClient()
|
from imdb.utils.config import base_uri, imdb_uris, tag_search
from imdb.utils.helpers import catch, dataframe_data, external_site, unicode
from imdb.utils.utils import BeautifulSoup, get, pd, re
# Retrieves External Sites Details
class external_sites:
"""
Collects External Sites details of the multi-media content in IMDb when title_id is given.
:param title_id: Unique identification for every multimedia in IMdb.
"""
def __init__(self, title_id):
self.title_id = title_id
self.external_sites_url = imdb_uris["externalsites"] % self.title_id
soup = BeautifulSoup(get(self.external_sites_url).text, 'lxml')
"""
:returns: Movie Title
"""
movie_tag = catch(
'None', lambda: soup.select_one('h3[itemprop="name"]'))
self.title = catch('None', lambda: unicode(movie_tag.a.get_text()))
self.title_url = catch('None', lambda: unicode(
'%s%s' % (base_uri, movie_tag.a['href'][1:])))
self.year = catch('None', lambda: int(re.findall(
r"\d+", unicode(movie_tag.select_one('.nobr').get_text()))[0]))
"""
returns: Official Sites DataFrame if available.
"""
self.official_sites_df = catch(
'None', lambda: external_site(tag_search['official'], soup))
self.official_sites_names = catch(
'list', lambda: self.official_sites_df.Name.tolist())
self.official_sites_urls = catch(
'list', lambda: self.official_sites_df.URI.tolist())
"""
returns: Miscellaneous Sites DataFrame if available.
"""
self.miscellaneous_sites_df = catch(
'None', lambda: external_site(tag_search['miscellaneous'], soup))
self.miscellaneous_sites_names = catch(
'list', lambda: self.miscellaneous_sites_df.Name.tolist())
self.miscellaneous_sites_urls = catch(
'list', lambda: self.miscellaneous_sites_df.URI.tolist())
"""
returns: Photographs Sites DataFrame if available.
"""
self.photographs_sites_df = catch(
'None', lambda: external_site(tag_search['photo'], soup))
self.photographs_sites_names = catch(
'list', lambda: self.photographs_sites_df.Name.tolist())
self.photographs_sites_urls = catch(
'list', lambda: self.photographs_sites_df.URI.tolist())
"""
returns: Videos Clips and Trailers Sites DataFrame if available.
"""
self.videos_clips_and_trailers_sites_df = catch(
'None', lambda: external_site(tag_search['videos'], soup))
self.videos_clips_and_trailers_sites_names = catch(
'list', lambda: self.videos_clips_and_trailers_sites_df.Name.tolist())
self.videos_clips_and_trailers_sites_urls = catch(
'list', lambda: self.videos_clips_and_trailers_sites_df.URI.tolist())
"""
:returns: Creates Meta Data from the above info. if available.
"""
self.imdb_external_sites_metadata = catch('dict', lambda: {"Movie Title": self.title,
"Movie URL": self.external_sites_url,
"Title ID": self.title_id,
"Year": self.year,
"Official Sites": {"Name": self.official_sites_names,
"URL": self.official_sites_urls},
"Miscellaneous Sites": {"Name": self.miscellaneous_sites_names,
"URL": self.miscellaneous_sites_urls},
"Photographs": {"Name": self.photographs_sites_names,
"URL": self.photographs_sites_urls},
"Video Clips and Trailers": {"Name": self.videos_clips_and_trailers_sites_names,
"URL": self.videos_clips_and_trailers_sites_urls}})
|
from flask import Flask, render_template, request, jsonify, url_for, session, redirect
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////sqlite3/todo.db'
db = SQLAlchemy(app)
z = ""
# This Class Creates table for TaskList
class TaskList(db.Model):
__tablename__ = 'TaskList'
id = db.Column('id', db.Integer, primary_key=True)
name = db.Column("name", db.String(200))
tasks = db.Column("tasks", db.String(300))
# This Class Creates table for Task
class Task(db.Model):
__tablename__ = 'Task'
SNo = db.Column("Sno", db.Integer, primary_key=True, autoincrement=True)
id = db.Column('id', db.Integer, db.ForeignKey("TaskList.id"))
name = db.Column("name", db.Unicode)
tasks = db.Column("tasks", db.Boolean)
# Home Page
@app.route('/api')
def index():
return render_template('index.html')
# This function returns the template for New User.
@app.route('/api/tasks')
def api():
return render_template('tasklist.html')
# This function adds TaskLists into the Table TaskList.
@app.route('/add', methods=['POST'])
def add():
sodo = TaskList(id=request.form['id'], name=request.form['name'], tasks=request.form['tasks'])
db.session.add(sodo)
db.session.commit()
todo = TaskList.query.filter_by(id=int(request.form['id'])).first()
id = todo.id
print(id)
result = (todo.tasks)
res = result.split(",")
print(res)
for name in res:
todo = Task(id=id, name=name, tasks=False)
db.session.add(todo)
db.session.commit()
print(id)
return redirect(url_for("complete", id=int(request.form['id'])))
# This function adds the new task for existing Task List
@app.route("/newtask", methods=["POST"])
def newtask():
print("HI")
todo = Task(id=z, name=(request.form['tsk']), tasks=False)
db.session.add(todo)
db.session.commit()
print(z)
print(todo)
return redirect(url_for("complete", id=z))
# This function returns the Todo Lists of User.
@app.route('/api/task/<id>')
def complete(id):
global z
incomplete = Task.query.filter_by(id=int(id), tasks=False).all()
complete = Task.query.filter_by(id=int(id), tasks=True).all()
nm = TaskList.query.filter_by(id=int(id)).all()
z = int(id)
return render_template('display.html', incomplete=incomplete, complete=complete, name=nm)
# This function updates the Task completion
@app.route('/complete/<id>')
def completed(id):
todo = Task.query.filter_by(id=int(id), tasks=False).first()
todo.tasks = True
db.session.commit()
print(todo)
return redirect(url_for('complete', id=id))
if __name__ == '__main__':
app.run(debug=True)
|
"""
Copyright (c) 2013 Timon Wong
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sublime
import sublime_plugin
import codecs
import os
import locale
import subprocess
import sys
import tempfile
import threading
import time
import types
from functools import partial
PY3K = sys.version_info >= (3, 0, 0)
if PY3K:
from imp import reload
# Reloading modules
for key in sys.modules.keys():
if key.find('OmniMarkupLib') >= 0:
try:
mod = sys.modules[key]
if isinstance(mod, types.ModuleType):
reload(mod)
except:
pass
if PY3K:
from .OmniMarkupLib import log, Server
from .OmniMarkupLib.Setting import Setting
from .OmniMarkupLib.RendererManager import RenderedMarkupCache, RendererManager
from .OmniMarkupLib.Common import Singleton
from .OmniMarkupLib import desktop
else:
exec('import OmniMarkupLib.LinuxModuleChecker')
from OmniMarkupLib import log, Server
from OmniMarkupLib.Setting import Setting
from OmniMarkupLib.RendererManager import RenderedMarkupCache, RendererManager
from OmniMarkupLib.Common import Singleton
from OmniMarkupLib import desktop
def launching_web_browser_for_url(url, success_msg_default=None, success_msg_user=None):
try:
setting = Setting.instance()
if setting.browser_command:
browser_command = [os.path.expandvars(arg).format(url=url)
for arg in setting.browser_command]
if os.name == 'nt':
# unicode arguments broken under windows
encoding = locale.getpreferredencoding()
browser_command = [arg.encode(encoding) for arg in browser_command]
subprocess.Popen(browser_command)
if success_msg_user:
sublime.status_message(success_msg_user)
else:
# Default web browser
desktop.open(url)
if success_msg_default:
sublime.status_message(success_msg_default)
except:
if setting.browser_command:
log.exception('Error while launching user defined web browser')
else:
log.exception('Error while launching default web browser')
def ReloadNotify():
msg = 'HTML Template updated, please reload your browser'
sublime.status_message('%s' % (msg))
log.info('%s' % (msg))
class DefaultCommand(sublime_plugin.ApplicationCommand):
def run (self):
setting = Setting.instance()
setting.load_setting()
ReloadNotify()
class GithubCommand(sublime_plugin.ApplicationCommand):
def run (self):
setting = Setting.instance()
setting.load_github()
ReloadNotify()
class AsciidoctorCommand(sublime_plugin.ApplicationCommand):
def run (self):
setting = Setting.instance()
setting.load_asciidoctor()
ReloadNotify()
class OmniMarkupPreviewCommand(sublime_plugin.TextCommand):
def run(self, edit, immediate=True):
# Whether RendererManager is finished loading?
if not RendererManager.ensure_started():
sublime.status_message('MarkupPreview has not yet started')
return
buffer_id = self.view.buffer_id()
# Opened in a tab already?
opened = False
for view in self.view.window().views():
if view.buffer_id() == buffer_id:
opened = True
break
if not opened:
RendererManager.enqueue_view(self.view, immediate=True)
host = Setting.instance().server_host
port = Setting.instance().server_port
if host == '0.0.0.0':
host = '127.0.0.1'
url = 'http://%s:%d/view/%d' % (host, port, buffer_id)
# Open with the default browser
log.info('Launching web browser for %s', url)
launching_web_browser_for_url(
url,
success_msg_default='Preview launched in default web browser',
success_msg_user='Preview launched in user defined web browser')
def is_enabled(self):
return RendererManager.any_available_renderer_for_view(self.view)
class OmniMarkupCleanCacheCommand(sublime_plugin.ApplicationCommand):
def run(self):
storage = RenderedMarkupCache.instance()
storage.clean()
class OmniMarkupExportCommand(sublime_plugin.TextCommand):
def copy_to_clipboard(self, html_content):
sublime.set_clipboard(html_content)
sublime.status_message('Exported result copied to clipboard')
def write_to_file(self, html_content, setting):
target_folder = setting.export_options.get('target_folder', '.')
if target_folder is not None:
fullpath = self.view.file_name() or ''
timestamp_format = setting.export_options.get('timestamp_format', '_%y%m%d%H%M%S')
timestr = time.strftime(timestamp_format, time.localtime())
if (not os.path.exists(fullpath) and target_folder == '.') or \
not os.path.isdir(target_folder):
target_folder = None
elif target_folder == '.':
fn_base, _ = os.path.splitext(fullpath)
html_fn = '%s%s.html' % (fn_base, timestr)
elif not os.path.exists(fullpath):
html_fn = os.path.join(target_folder, 'Untitled%s.html' % timestr)
else:
fn_base = os.path.basename(fullpath)
html_fn = os.path.join(target_folder, '%s%s.html' % (fn_base, timestr))
# No target folder, create file in temporary directory
if target_folder is None:
with tempfile.NamedTemporaryFile(delete=False, suffix='.html') as f:
html_fn = f.name
with codecs.open(html_fn, 'w', encoding='utf-8') as html_file:
html_file.write(html_content)
log.info('Successfully exported to: %s', html_fn)
return html_fn
def run(self, edit, clipboard_only=False):
view = self.view
try:
html_content = RendererManager.render_view_as_html(view)
if clipboard_only:
self.copy_to_clipboard(html_content)
return
setting = Setting.instance()
html_fn = self.write_to_file(html_content, setting)
# Copy contents to clipboard
if setting.export_options.get('copy_to_clipboard', False):
self.copy_to_clipboard(html_content)
# Open output file if necessary
if setting.export_options.get('open_after_exporting', False):
log.info('Launching web browser for %s', html_fn)
launching_web_browser_for_url(html_fn)
except NotImplementedError:
pass
except:
sublime.error_message('Error while exporting, please check your console for more information.')
log.exception('Error while exporting')
def is_enabled(self):
return RendererManager.any_available_renderer_for_view(self.view)
class ThrottleQueue(threading.Thread):
WAIT_TIMEOUT = 0.02
class Entry(object):
def __init__(self, view, timeout):
self.view = view
self.filename = view.file_name()
self.timeout = timeout
def __cmp__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __init__(self):
threading.Thread.__init__(self)
self.mutex = threading.Lock()
self.cond = threading.Condition(self.mutex)
self.stopping = False
self.last_signaled = time.time()
self.view_entry_mapping = {}
def put(self, view, preemptive=True, timeout=0.5):
if not RendererManager.any_available_renderer_for_view(view):
return
view_id = view.id()
now = time.time()
with self.mutex:
if view_id in self.view_entry_mapping:
# Too fast, cancel this operation
if now - self.last_signaled <= 0.01:
return
if preemptive:
# Cancel pending actions
with self.cond:
if view_id in self.view_entry_mapping:
del self.view_entry_mapping[view_id]
self.cond.notify()
RendererManager.enqueue_view(view, only_exists=True)
self.last_signaled = now
else:
with self.cond:
filename = view.file_name()
if view_id not in self.view_entry_mapping:
self.view_entry_mapping[view_id] = self.Entry(view, timeout)
else:
entry = self.view_entry_mapping[view_id]
entry.view = view
entry.filename = filename
entry.timeout = timeout
self.cond.notify()
def enqueue_view_to_renderer_manager(self, view, filename):
if view.is_loading() or view.file_name() != filename:
return
if RendererManager.any_available_renderer_for_view(view):
RendererManager.enqueue_view(view, only_exists=True)
self.last_signaled = time.time()
def run(self):
prev_time = time.time()
while True:
with self.cond:
if self.stopping:
break
self.cond.wait(self.WAIT_TIMEOUT)
if self.stopping:
break
if len(self.view_entry_mapping) > 0:
now = time.time()
diff_time = now - prev_time
prev_time = time.time()
for view_id in list(self.view_entry_mapping.keys()):
o = self.view_entry_mapping[view_id]
o.timeout -= max(diff_time, self.WAIT_TIMEOUT)
if o.timeout <= 0:
del self.view_entry_mapping[view_id]
sublime.set_timeout(partial(self.enqueue_view_to_renderer_manager,
o.view, o.filename), 0)
else:
# No more items, sleep
self.cond.wait()
def stop(self):
with self.cond:
self.stopping = True
self.cond.notify()
self.join()
class PluginEventListener(sublime_plugin.EventListener):
def __init__(self):
self.throttle = ThrottleQueue()
self.throttle.start()
def __del__(self):
self.throttle.stop()
def on_query_context(self, view, key, operator, operand, match_all):
# `omp_is_enabled` for backwards compatibility
if key == 'omnimarkup_is_enabled' or key == 'omp_is_enabled':
return RendererManager.any_available_renderer_for_view(view)
return None
def _on_close(self, view):
storage = RenderedMarkupCache.instance()
entry = storage.get_entry(view.buffer_id())
if entry is not None:
entry.disconnected = True
def _on_modified(self, view):
# Prevent rare complaintion about slow callback
def callback():
setting = Setting.instance()
if not setting.refresh_on_modified:
return
timeout = setting.refresh_on_modified_delay / 1000.0
self.throttle.put(view, preemptive=False, timeout=timeout)
if PY3K:
callback()
else:
sublime.set_timeout(callback, 0)
def _on_post_save(self, view):
if not Setting.instance().refresh_on_saved:
return
self.throttle.put(view, preemptive=True)
if PY3K:
on_close_async = _on_close
on_modified_async = _on_modified
on_post_save_async = _on_post_save
else:
on_close = _on_close
on_modified = _on_modified
on_post_save = _on_post_save
g_server = None
@Singleton
class PluginManager(object):
def __init__(self):
setting = Setting.instance()
self.on_setting_changing(setting)
def on_setting_changing(self, setting):
self.old_server_host = setting.server_host
self.old_server_port = setting.server_port
self.old_ajax_polling_interval = setting.ajax_polling_interval
self.old_html_template_name = setting.html_template_name
def on_setting_changed(self, setting):
if (setting.ajax_polling_interval != self.old_ajax_polling_interval or
setting.html_template_name != self.old_html_template_name):
sublime.status_message('MarkupPreview requires a browser reload to apply changes')
need_server_restart = (setting.server_host != self.old_server_host or
setting.server_port != self.old_server_port)
if need_server_restart:
self.restart_server()
def subscribe_setting_events(self):
Setting.instance().subscribe('changing', self.on_setting_changing)
Setting.instance().subscribe('changed', self.on_setting_changed)
def restart_server(self):
global g_server
if g_server is not None:
self.stop_server()
setting = Setting.instance()
g_server = Server.Server(host=setting.server_host, port=setting.server_port)
def stop_server(self):
global g_server
if g_server is not None:
g_server.stop()
g_server = None
def unload_handler():
log.info('Unloading plugin...')
# Cleaning up resources...
PluginManager.instance().stop_server()
# Stopping renderer worker
RendererManager.stop()
def plugin_loaded():
Server.init()
# Setting must be the first to initialize.
Setting.instance().init()
PluginManager.instance().subscribe_setting_events()
RendererManager.start()
PluginManager.instance().restart_server()
if not PY3K:
plugin_loaded()
|
'''
Created on 2014.02.21.
@author: fekete
'''
from tkinter import *
from tkinter.ttk import *
from hu.minux.prodmaster.gui.AbstractFrame import AbstractFrame
from hu.minux.prodmaster.gui.MinuxTable import MinuxTable
from hu.minux.prodmaster.app.Stock import Stock
from hu.minux.prodmaster.tools.World import World
class StockPanel(AbstractFrame):
_type = 'STOCKS'
_myEntityType = Stock
_nameLabel = None
_nameEntry = None
_remarkLabel = None
_remarkEntry = None
def __init__(self, master, appFrame):
AbstractFrame.__init__(self, master, appFrame)
@staticmethod
def getInstance(appFrame):
if StockPanel._instance == None:
StockPanel._instance = StockPanel(appFrame.getWorkPane(), appFrame)
return StockPanel._instance
def _clearForm(self):
self._nameEntry.delete(0, END)
self._remarkEntry.delete('0.0', END)
def _create(self):
AbstractFrame._create(self)
self._entity = Stock.new()
def _createWidgets(self):
r = 0
c = 0
self._nameLabel = Label(self, text=World.L("NAME"));
self._nameLabel.grid(row=r, column=c, sticky=W, padx=World.smallPadSize(), pady=World.smallPadSize())
c += 1
self._nameEntry = Entry(self, width=World.defaultEntryWidth())
self._nameEntry.grid(row=r, column=c, sticky=W, padx=World.smallPadSize(), pady=World.smallPadSize())
c = 0
r += 1
self._remarkLabel = Label(self, text=World.L("REMARK"))
self._remarkLabel.grid(row=r, column=c, sticky=W, padx=World.smallPadSize(), pady=World.smallPadSize())
c += 1
self._remarkEntry = Text(self, width=World.textEntryWidth(), height=10)
self._remarkEntry.grid(row=r, column=c, sticky="WE", padx=World.smallPadSize(), pady=World.smallPadSize())
# Append operation buttons
c = 0
r += 1
AbstractFrame._createWidgets(self, r , c, 2)
def setState(self, widget, state='disabled'):
'''Override'''
AbstractFrame.setState(self, widget, state)
def _save(self):
self._entity.name = self._nameEntry.get()
self._entity.remark = self._remarkEntry.get('0.0', 'end-1c')
AbstractFrame._save(self)
def _validate(self):
''' TO BE IMPLEMENTED '''
errorStr = None
return errorStr
def refreshDetails(self, params):
'''Override'''
# self._contentTable.deleteEntries()
AbstractFrame.refreshDetails(self, params)
def showItem(self, elementId):
self._entity = Stock.get(elementId)
e = self._entity
self._clearForm()
self._nameEntry.insert(0, e.name)
self._remarkEntry.insert('0.0', e.remark)
|
import unittest
from tests.conftest import Dummy, fqn_test
class TestFQN(unittest.TestCase):
def test_fqn(self):
self.assertEqual(fqn_test.fqn, 'tests.conftest.fqn_test')
self.assertEqual(Dummy.fqn, 'tests.conftest.Dummy')
self.assertEqual(Dummy().go.fqn, 'tests.conftest.Dummy.go')
|
'''
See COPYRIGHT.md for copyright information.
This module encodes a source python file (from utf-8 to ascii with \\u escapes).
'''
import datetime, sys
if __name__ == "__main__":
with open(sys.argv[1], "rt", encoding="utf-8") as fIn:
with open(sys.argv[2], "wb") as fOut:
while True:
line = fIn.readline()
if not line:
break
if '# -*- coding: utf-8 -*-' not in line:
fOut.write(line.encode('ascii', 'backslashreplace'))
|
import numpy as np
u = np.array([1, 2, 3])
v = np.array([1, 1, 1])
c = 1
n = np.linalg.norm(u @ v + c)
d = np.linalg.norm(v)
print(n / d)
# This is the shortest distance between the plane x + y + z = -1 and vector (1 2 3)
|
def assignNodeNames(nodeList) :
nameDict = {}
for node in nodeList :
count = nameDict.setdefault(node.type , 1)
if count !=1 :
node.addAttribute('name', node.type + str(count))
else :
node.addAttribute('name', node.type)
node.assignName()
nameDict[node.type] += 1
def assignTokenNodeNames(nodeList) :
nameDict = {}
for node in nodeList :
count = nameDict.setdefault(node.lex , 1)
if count !=1 :
node.addAttribute('name', node.lex + str(count))
else :
node.addAttribute('name', node.lex)
node.assignName()
nameDict[node.lex] += 1
|
import numpy as np
s1 = 'GCACU'
s2 = 'UUGA'
def seq_alignment(s1, s2):
M = np.zeros((len(s1) + 1, len(s2) + 1))
i = 0
for x in M:
j = 0
for _ in x:
if i != 0 and j != 0:
if s1[i - 1] == s2[j - 1]:
M[i][j] = M[i-1][j-1] + 1
if s1[i - 1] != s2[j - 1]:
if M[i - 1][j] > M[i][j]:
M[i][j] = M[i - 1][j]
if M[i][j - 1] > M[i][j]:
M[i][j] = M[i][j - 1]
j += 1
i += 1
print(M[i - 1][j - 1])
def main():
seq_alignment(s1, s2)
if __name__ == '__main__':
main()
|
import tkinter
from tkinter import messagebox
def change_bg():
btn.configure(background = "green") #설정
# show_messa()
def show_messa():
if messagebox.askokcancel(title="hello python", detail = "yes or no"):
messagebox.showinfo(title="ok", detail="you have pressed 'Yes'")
else:
messagebox.showwarning(title="Warning", detail = "you have pressed 'No'")
mywin = tkinter.Tk()
mywin.geometry("{}x{}".format(200,200))
frame_up = tkinter.Frame(mywin, height = 60, width = 90, background = "blue")
frame_down = tkinter.Frame(mywin, height = 30, width = 90, background = "red")
frame_up.pack()
frame_down.pack()
btn = tkinter.Button(frame_down, text = "click", command = change_bg,
foreground = "white", background = "black",
activeforeground = "blue",
activebackground = "#FF007F")
btn.pack() # 붙여라
mywin.mainloop()
|
from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from views import IndexView, FlujosProyectoIndex
from apps.proyectos.models import Proyecto
from models import Flujo, Actividad, PlantillaFlujo, ActividadFlujoPlantilla
from apps.roles_proyecto.models import RolProyecto, RolProyecto_Proyecto
class PlantillaFlujoTest(TestCase):
"""
Clase que realiza el Test del modulo de administracion de plantillas de flujo
"""
def setUp(self):
"""
Funcion que inicializa el RequestFactory y un usuario de prueba para
realizar los test
"""
# Se crea el Request factory pars simular peticiones
self.factory = RequestFactory()
# Se crea el User que realiza las peticiones
self.user = User.objects.create_user(username='testuser', email='test@test.com', password='test')
def test_view_IndexView(self):
"""
Funcion que realiza el test sobre la vista IndexView que genera
lista de plantillas de flujo
"""
# se loguea el usuario testuser
user = self.client.login(username='testuser', password='test')
self.assertTrue(user)
user2 = User.objects.create_user(username='user_prueba', email='test@test22.com', password='prueba')
proyecto = Proyecto.objects.create(codigo='codi', nombre_corto='test',
nombre_largo='test', cancelado=False, scrum_master=user2)
proyecto.save()
lista_actividades = []
# se crean 3 actividades para controlar que se retorne la lista completa de sprints, que seran 3 en total
for i in range(3):
actividad_plantilla_flujo = ActividadFlujoPlantilla.objects.create(nombre='actividad_p%s' % i)
actividad_plantilla_flujo.save()
lista_actividades.append(actividad_plantilla_flujo)
# se crean 10 plantillas de flujo para controlar que se retorne la lista completa de plantillas de flujo, que seran 10 en total
for i in range(10):
plantilla_flujo = PlantillaFlujo.objects.create(nombre='p_flujo%s' % i)
plantilla_flujo.actividades = lista_actividades
plantilla_flujo.save()
# verificamos que la vista devuelva el template adecuado
request = self.factory.get(reverse('flujos:index'))
request.user = self.user
response = IndexView.as_view()(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], 'flujos/index.html')
# verificamos las plantillas de flujo retornados
self.assertEqual(len(response.context_data['object_list']), 10)
print 'Test de IndexView de Plantillas de flujo realizado exitosamente'
def test_view_crear_plantilla_flujo(self):
"""
Funcion que realiza el test sobre la vista crear_plantilla_flujo que crea
una nueva plantilla de flujo
"""
# se loguea el usuario testuser
user = self.client.login(username='testuser', password='test')
self.assertTrue(user)
user3 = User.objects.create_user(username='user_prueba3', email='test@test223.com', password='prueba')
# se crea un usuario
proyecto = Proyecto.objects.create(codigo='codi', nombre_corto='test',
nombre_largo='test', cancelado=False, scrum_master=user3)
proyecto.save()
lista_actividades = []
# se crean 3 actividades para controlar que se retorne la lista completa de sprints, que seran 3 en total
for i in range(3):
actividad_plantilla_flujo = ActividadFlujoPlantilla.objects.create(nombre='actividad_p%s' % i)
actividad_plantilla_flujo.save()
lista_actividades.append(actividad_plantilla_flujo)
plantilla_flujo = PlantillaFlujo.objects.create(nombre='p_flujo_c')
plantilla_flujo.actividades = lista_actividades
plantilla_flujo.save()
self.assertEqual(plantilla_flujo.nombre, 'p_flujo_c')
print 'Test de crear_plantilla_flujo realizado exitosamente'
def test_view_update_plantilla_flujo(self):
"""
Funcion que realiza el test sobre la vista update_plantilla_flujo que modifica
una plantilla de flujo existente
"""
# se loguea el usuario testuser
user = self.client.login(username='testuser', password='test')
self.assertTrue(user)
user4 = User.objects.create_user(username='user_prueba4', email='test@test224.com', password='prueba')
# se crea un usuario
proyecto = Proyecto.objects.create(codigo='codi', nombre_corto='test',
nombre_largo='test', cancelado=False, scrum_master=user4)
proyecto.save()
lista_actividades = []
# se crean 3 actividades para controlar que se retorne la lista completa de sprints, que seran 3 en total
for i in range(3):
actividad_plantilla_flujo = ActividadFlujoPlantilla.objects.create(nombre='actividad_p%s' % i)
actividad_plantilla_flujo.save()
lista_actividades.append(actividad_plantilla_flujo)
plantilla_flujo = PlantillaFlujo.objects.create(nombre='p_flujo_c')
plantilla_flujo.actividades = lista_actividades
plantilla_flujo.save()
# se crean nuevos valores para los atributos
nuevo_nombre = 'nuevo_nombre'
# Se modifican los atributos del sprint
plantilla_flujo.nombre = nuevo_nombre
plantilla_flujo.save()
self.assertEqual(plantilla_flujo.nombre, 'nuevo_nombre')
print 'Test de update_plantilla_flujo realizado exitosamente'
def test_view_ActividadCreate(self):
"""
Funcion que realiza el test sobre la vista ActividadCreate que crea
una nueva actividad
"""
# se loguea el usuario testuser
user = self.client.login(username='testuser', password='test')
self.assertTrue(user)
user4 = User.objects.create_user(username='user_prueba4', email='test@test224.com', password='prueba')
# se crea un usuario
proyecto = Proyecto.objects.create(codigo='codi', nombre_corto='test',
nombre_largo='test', cancelado=False, scrum_master=user4)
proyecto.save()
actividad_plantilla_flujo = ActividadFlujoPlantilla.objects.create(nombre='actividad_p')
actividad_plantilla_flujo.save()
self.assertEqual(actividad_plantilla_flujo.nombre, 'actividad_p')
print 'Test de ActividadCreate realizado exitosamente'
def test_view_FlujosProyectoIndex(self):
"""
Funcion que realiza el test sobre la vista FlujosProyectoIndex que genera
lista de flujos dentro de un proyecto
"""
# se loguea el usuario testuser
user = self.client.login(username='testuser', password='test')
self.assertTrue(user)
user2 = User.objects.create_user(username='user_prueba', email='test@test22.com', password='prueba')
proyecto = Proyecto.objects.create(codigo='codi', nombre_corto='test',
nombre_largo='test', cancelado=False, scrum_master=user2)
proyecto.save()
lista_actividades = []
# se crean 3 actividades para controlar que se retorne la lista completa de sprints, que seran 3 en total
for i in range(3):
actividad_flujo = Actividad.objects.create(nombre='actividad%s' % i)
actividad_flujo.save()
lista_actividades.append(actividad_flujo)
# se crean 10 flujos para controlar que se retorne la lista completa de flujos, que seran 10 en total
for i in range(10):
flujo = Flujo.objects.create(nombre='flujo%s' % i, proyecto=proyecto)
flujo.actividades = lista_actividades
flujo.save()
group = Group.objects.create(name='grupo')
group.save()
rolProyecto = RolProyecto(group=group, es_rol_proyecto=True)
rolProyecto.save()
row_rol = RolProyecto_Proyecto(user=self.user, rol_proyecto=rolProyecto, proyecto=proyecto)
row_rol.save()
# verificamos que la vista devuelva el template adecuado
request = self.factory.get(reverse('flujos:flujos_proyecto_index', args=[proyecto.pk]))
request.user = self.user
response = FlujosProyectoIndex.as_view()(request, pk_proyecto=proyecto.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.template_name[0], 'flujos/flujos_proyecto_index.html')
# verificamos las plantillas de flujo retornados
self.assertEqual(len(response.context_data['object_list']), 10)
print 'Test de FlujosProyectoIndex realizado exitosamente'
def test_view_FlujoProyectoAsignar(self):
"""
Funcion que realiza el test sobre la vista FlujoProyectoAsignar que asigna
un flujo a un proyecto
"""
# se loguea el usuario testuser
user = self.client.login(username='testuser', password='test')
self.assertTrue(user)
user4 = User.objects.create_user(username='user_prueba4', email='test@test224.com', password='prueba')
# se crea un usuario
proyecto = Proyecto.objects.create(codigo='codi', nombre_corto='test',
nombre_largo='test', cancelado=False, scrum_master=user4)
proyecto.save()
lista_actividades = []
# se crean 3 actividades para controlar que se retorne la lista completa de sprints, que seran 3 en total
for i in range(3):
actividad_flujo = Actividad.objects.create(nombre='actividad%s' % i)
actividad_flujo.save()
lista_actividades.append(actividad_flujo)
flujo = Flujo.objects.create(nombre='flujo', proyecto=proyecto)
flujo.actividades = lista_actividades
flujo.save()
group = Group.objects.create(name='grupo')
group.save()
rolProyecto = RolProyecto(group=group, es_rol_proyecto=True)
rolProyecto.save()
row_rol = RolProyecto_Proyecto(user=self.user, rol_proyecto=rolProyecto, proyecto=proyecto)
row_rol.save()
self.assertEqual(flujo.nombre, 'flujo')
self.assertEqual(flujo.proyecto, proyecto)
print 'Test de FlujoProyectoAsignar realizado exitosamente'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `openwrt_luci_rpc` package."""
import unittest
from openwrt_luci_rpc import utilities
from openwrt_luci_rpc.constants import Constants
class TestOpenwrtLuciRPC(unittest.TestCase):
"""Tests for `openwrt_luci_rpc` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_normalise_keys(self):
"""Test replacing v18 keys works as expected."""
data = {'dest': "10.1.1.11"}
data = utilities.normalise_keys(data)
assert data[Constants.MODERN_KEYS["dest"]] == '10.1.1.11'
|
from django.conf.urls import url,include
from django.contrib import admin
from testApp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^date/',views.dateinfo),
]
|
"""
龙穴模块测试
1、检查龙穴一级界面和二级界面的控件,并进行点击和关闭操作
"""
# -*- encoding=utf8 -*-
__author__ = "Sinwu"
from airtest.core.api import *
from multi_processframe.ProjectTools import common
def butpos(devices,butpos,pos1=0.4,pos2=0.81,high=1330,low=930,lows=482):
"""
把不在屏幕内部的控件滑动到屏幕内,使之可被操作
:param butpos: 控件的坐标值
:param pos1: 希望控件所在屏幕上的最低限
:param pos2: 希望控件所在屏幕上的最上限
:param high: 固定坐标
:param low: 滑动起始或终点位置
:param lows: 滑动起始或终点位置
:return:
"""
for i in range(20):
but = butpos.get_position()
if but[1] < pos1:
common.setswipe(1, [high, lows], [high, low], devices)
elif but[1] > pos2:
common.setswipe(1, [high, low], [high, lows], devices)
else:
break
def UIjudge(start, poco,num, devices):
sleep(3)
freeze_poco = poco.freeze() # TODO:定义dongjiepoco
if freeze_poco("NestName").exists() and freeze_poco("CurPPT").exists() and \
freeze_poco("SugPPT").exists() and freeze_poco("SugAttr").exists() and \
freeze_poco("SugLevel").exists() and freeze_poco("SugMember").exists():
common.printgreen("检查点: " + freeze_poco("CurPPT").child("T").get_text() + " " +
freeze_poco("CurPPT").get_text() + " 显示正确")
common.printgreen("检查点: " + freeze_poco("SugPPT").child("T").get_text() + " " +
freeze_poco("SugPPT").get_text() + " 显示正确")
common.printgreen("检查点: " + freeze_poco("SugAttr").child("T").get_text() + " " +
freeze_poco("SugAttr").get_text() + " 显示正确")
common.printgreen("检查点: " + freeze_poco("SugLevel").child("T").get_text() + " " +
freeze_poco("SugLevel").get_text() + " 显示正确")
common.printgreen("检查点: " + freeze_poco("SugMember").child("T").get_text() + " " +
freeze_poco("SugMember") .get_text() + " 显示正确")
for item in range(len(freeze_poco("NestFrameNormal").child()) - num):
item1 = "item" + str(item)
common.printgreen("检查点 " + freeze_poco("DragonNestDlg(Clone)").offspring("NestFrameNormal")
.child(item1).child("Name").get_text() + " 显示正确")
common.printgreen("进行点击奖励icon的操作")
for item in range(len(freeze_poco("ItemList").child()) - 2):
item2 = "item" + str(item)
freeze_poco("DragonNestDlg(Clone)").offspring("ItemList").child(item2).click()
freeze_poco("DragonNestDlg(Clone)").offspring("ItemList").child(item2).click()
if poco("ItemToolTipDlg(Clone)").child("Bg").exists():
poco("DragonNestDlg(Clone)").offspring("ItemList").child(item2).click() # todo:进行两次判断,避免出现点击不及时的情况
if poco("ItemToolTipDlg(Clone)").child("Bg").exists():
poco("DragonNestDlg(Clone)").offspring("ItemList").child(item2).click()
common.printgreen("奖励icon点击完成,报不报红暂时我就不管了")
common.printgreen("点击 组队进入")
poco("Enter").child("Text").click()
if poco(text="队伍").exists():
common.printgreen("进入组队界面,开始点击返回按钮")
poco("Close").click()
if poco("Enter").exists():
common.printgreen("返回到龙穴界面")
else:
common.printred("点击组队后没有进入组队界面")
common.get_screen_shot(start, time.time(), devices, "点击组队后没有进入组队界面")
def longxue(start, devices):
poco = common.deviceconnect(devices)
if poco("Duck").exists():
poco("Duck").click()
sleep(2)
poco("XSys_Activity").click() # 点击日常按钮
else:
common.printgreen("主界面缺少日常按钮,请检查...")
common.get_screen_shot(start, time.time(), devices, "主界面缺少日常按钮")
if poco("DailyActivityDlg(Clone)").offspring("XActivityHandler").offspring("Item526")\
.offspring("Background").exists():
pos = poco("DailyActivityDlg(Clone)").offspring("XActivityHandler").offspring("Item526")\
.offspring("Background") # 龙穴参加按钮
butpos(devices,butpos=pos, pos1=0.4, pos2=0.79, high=565, low=511, lows=240) # 调用butpos方法
pos.click() # 点击副本参加按钮
else:
common.printgreen("日常界面没有龙穴选项,请检查...")
common.get_screen_shot(start, time.time(), devices, "日常界面没有龙穴选项")
freeze_poco = poco.freeze() # TODO:定义冻结poco
if freeze_poco("Panel").exists() and \
freeze_poco("NestFrameNormal").exists() and \
freeze_poco("Enter").exists() and \
freeze_poco("CurPPT").exists() and \
freeze_poco("SugAttr").exists() and \
freeze_poco("SugLevel").exists() and \
freeze_poco("SugMember").exists():
common.printgreen("进入龙穴界面\n检查点: 四个巢穴\n检查点: 显示副本界面\n检查点: 进入副本按钮\n检查点: 推荐进入要求\n以上全部显示正确 ")
else:
common.printred("龙穴界面缺少控件元素,请检查。。。")
common.get_screen_shot(start, time.time(), devices, "龙穴界面缺少控件元素")
# 只操作冰龙和符纹龙
for item in range(2, len(poco("Panel").child())):
item1 = "item" + str(item)
if not poco("DragonNestDlg(Clone)").offspring(item1).offspring(texture="l_lock_01").exists(): # 如果等级足够,并且模块高亮显示
poco("DragonNestDlg(Clone)").offspring(item1).click() # 点击各个巢穴
print(f"点击第{item}个龙穴")
A = len(poco("DragonNestDlg(Clone)").offspring("NestFrameNormal").child())
freeze_poco = poco.freeze() # TODO:定义冻结poco
for item in range(A-2):
freeze_poco("DragonNestDlg(Clone)").offspring("NestFrameNormal").child(f"item{item}").click()
sleep(0.3)
# if poco("ToggleDiffEasy").exists(): # 判断是否有简单选项
# poco("DragonNestDlg(Clone)").offspring(item1).child("Bg").child("Label").get_text()
# common.printgreen("<<<<<<<--进入 " + poco("DragonNestDlg(Clone)").offspring(item1).child("Bg")
# .child("Label").get_text() + " 简单模式-->>>>>>>>")
# poco("ToggleDiffEasy").click() # 点击简单选项
# UIjudge(start, poco, 2, devices) # 调用UIjudge判断当前界面的控件
# else:
# common.printgreen(poco("DragonNestDlg(Clone)").offspring(item1).child("Bg").child("Label").get_text()
# + " 没有简单模式")
# if poco("ToggleDiffNormal").exists(): # 判断是否有普通选项
# common.printgreen("<<<<<<<--进入 " + poco("DragonNestDlg(Clone)").offspring(item1).child("Bg")
# .child("Label").get_text() + " 普通模式-->>>>>>>>")
# poco("ToggleDiffNormal").click() # 点击普通选项
# UIjudge(start, poco, 3, devices) # 调用UIjudge判断当前界面的控件
# else:
# common.printgreen(poco("DragonNestDlg(Clone)").offspring(item1).child("Bg").child("Label").get_text()
# + " 没有普通模式")
# if poco("ToggleDiffHard").exists(): # 判断是否有困难选项
# common.printgreen("<<<<<<<--进入 " + poco("DragonNestDlg(Clone)").offspring(item1).child("Bg")
# .child("Label").get_text() + " 困难模式-->>>>>>>>")
# poco("ToggleDiffHard").click() # 点击困难选项
# UIjudge(start, poco, 3, devices) # 调用UIjudge判断当前界面的控件
# else:
# common.printgreen(poco("DragonNestDlg(Clone)").offspring(item1).child("Bg").child("Label").get_text()
# + " 没有困难模式")
# if poco("ToggleDiffAngel") .exists(): # 判断是否有困难选项
# common.printgreen("<<<<<<<--进入 " + poco("DragonNestDlg(Clone)").offspring(item1).child("Bg")
# .child("Label").get_text() + " 地狱模式-->>>>>>>>")
# poco("ToggleDiffAngel") .click() # 点击困难选项
# UIjudge(start, poco, 3, devices) # 调用UIjudge判断当前界面的控件
# else:
# common.printgreen(poco("DragonNestDlg(Clone)").offspring(item1).child("Bg").child("Label").get_text()
# + " 没有地狱模式")
# else:
# common.printred("等级不足,没有开启")
poco("Help").click() # 点击帮助按钮
poco(texture="l_close_00").click()
if not poco(texture="l_close_00").exists():
pass
else:
poco(texture="l_close_00").click()
return poco(texture="Bg_DragonNest_h2Split").child("T").get_text() # 龙穴
if __name__ == "__main__":
start = time.localtime()
longxue(start, "e37c0280")
|
"""This module defines the basic solver.
Binary electrohydrodynamics solved using a partial splitting approach and
linearisation. The problem is split between the following subproblems.
* PF: The phase-field equation is solved simultaneously with the phase-field
chemical potential (considered as a separate field), with a linearised
double-well potential to make the problem linear.
* EC: Solute concentrations are solved simultaneously as the electric
potential, with a linearization of the c \grad V term, to make the whole
problem linear.
* NS: The Navier-Stokes equations are solved simultaneously for the
velocity and pressure fields, where the intertial term is linearised
to make the whole subproblem linear.
GL, 2017-05-29
"""
import dolfin as df
import math
from common.functions import ramp, dramp, diff_pf_potential_linearised, \
unit_interval_filter, diff_pf_contact_linearised, pf_potential, alpha
from common.io import mpi_barrier, info_red
import numpy as np
from . import *
from . import __all__
def get_subproblems(base_elements, solutes,
p_lagrange,
enable_NS, enable_PF, enable_EC,
**namespace):
""" Returns dict of subproblems the solver splits the problem into. """
subproblems = dict()
if enable_NS:
subproblems["NS"] = [dict(name="u", element="u"),
dict(name="p", element="p")]
if p_lagrange:
subproblems["NS"].append(dict(name="p0", element="p0"))
if enable_PF:
subproblems["PF"] = [dict(name="phi", element="phi"),
dict(name="g", element="g")]
if enable_EC:
subproblems["EC"] = ([dict(name=solute[0], element="c")
for solute in solutes]
+ [dict(name="V", element="V")])
return subproblems
def setup(mesh, test_functions, trial_functions,
w_, w_1,
ds, dx, normal,
dirichlet_bcs, neumann_bcs, boundary_to_mark,
permittivity, density, viscosity,
solutes,
enable_PF, enable_EC, enable_NS,
surface_tension, dt, interface_thickness,
grav_const,
grav_dir,
friction_coeff,
pf_mobility,
pf_mobility_coeff,
use_iterative_solvers, use_pressure_stabilization,
comoving_velocity,
p_lagrange,
q_rhs,
**namespace):
""" Set up problem. """
# Constant
dim = mesh.geometry().dim()
sigma_bar = surface_tension*3./(2*math.sqrt(2))
per_tau = df.Constant(1./dt)
grav = df.Constant(tuple(grav_const*np.array(grav_dir[:dim])))
gamma = pf_mobility_coeff
eps = interface_thickness
fric = df.Constant(friction_coeff)
u_comoving = df.Constant(tuple(comoving_velocity[:dim]))
# Navier-Stokes
u_ = p_ = None
u_1 = p_1 = None
p0 = q0 = p0_ = p0_1 = None
if enable_NS:
u, p = trial_functions["NS"][:2]
v, q = test_functions["NS"][:2]
up_ = df.split(w_["NS"])
up_1 = df.split(w_1["NS"])
u_, p_ = up_[:2]
u_1, p_1 = up_1[:2]
if p_lagrange:
p0 = trial_functions["NS"][-1]
q0 = test_functions["NS"][-1]
p0_ = up_[-1]
p0_1 = up_1[-1]
# Phase field
if enable_PF:
phi, g = trial_functions["PF"]
psi, h = test_functions["PF"]
phi_, g_ = df.split(w_["PF"])
phi_1, g_1 = df.split(w_1["PF"])
else:
# Defaults to phase 1 if phase field is disabled
phi_ = phi_1 = 1.
g_ = g_1 = None
# Electrochemistry
if enable_EC:
num_solutes = len(trial_functions["EC"])-1
assert(num_solutes == len(solutes))
c = trial_functions["EC"][:num_solutes]
V = trial_functions["EC"][num_solutes]
b = test_functions["EC"][:num_solutes]
U = test_functions["EC"][num_solutes]
cV_ = df.split(w_["EC"])
cV_1 = df.split(w_1["EC"])
c_, V_ = cV_[:num_solutes], cV_[num_solutes]
c_1, V_1 = cV_1[:num_solutes], cV_1[num_solutes]
else:
c_ = V_ = c_1 = V_1 = None
phi_flt_ = unit_interval_filter(phi_)
phi_flt_1 = unit_interval_filter(phi_1)
M_ = pf_mobility(phi_flt_, gamma)
M_1 = pf_mobility(phi_flt_1, gamma)
mu_ = ramp(phi_flt_, viscosity)
rho_ = ramp(phi_flt_, density)
rho_1 = ramp(phi_flt_1, density)
veps_ = ramp(phi_flt_, permittivity)
dveps = dramp(permittivity)
drho = dramp(density)
dbeta = [] # Diff. in beta
z = [] # Charge z[species]
K_ = [] # Diffusivity K[species]
beta_ = [] # Conc. jump func. beta[species]
for solute in solutes:
z.append(solute[1])
K_.append(ramp(phi_, [solute[2], solute[3]]))
beta_.append(ramp(phi_, [solute[4], solute[5]]))
dbeta.append(dramp([solute[4], solute[5]]))
if enable_EC:
rho_e = sum([c_e*z_e for c_e, z_e in zip(c, z)]) # Sum of trial func.
rho_e_ = sum([c_e*z_e for c_e, z_e in zip(c_, z)]) # Sum of curr. sol.
else:
rho_e_ = None
solvers = dict()
if enable_PF:
solvers["PF"] = setup_PF(w_["PF"], phi, g, psi, h,
dx, ds, normal,
dirichlet_bcs["PF"], neumann_bcs,
boundary_to_mark,
phi_1, u_1, M_1, c_1, V_1,
per_tau, sigma_bar, eps, dbeta, dveps,
enable_NS, enable_EC,
use_iterative_solvers, q_rhs)
if enable_EC:
solvers["EC"] = setup_EC(w_["EC"], c, V, b, U, rho_e,
dx, ds, normal,
dirichlet_bcs["EC"], neumann_bcs,
boundary_to_mark,
c_1, u_1, K_, veps_, phi_flt_,
solutes,
per_tau, z, dbeta,
enable_NS, enable_PF,
use_iterative_solvers,
q_rhs)
if enable_NS:
solvers["NS"] = setup_NS(w_["NS"], u, p, v, q, p0, q0,
dx, ds, normal,
dirichlet_bcs["NS"], neumann_bcs,
boundary_to_mark,
u_1, phi_flt_,
rho_, rho_1, g_, M_, mu_, rho_e_,
c_, V_,
c_1, V_1,
dbeta, solutes,
per_tau, drho, sigma_bar, eps, dveps,
grav, fric,
u_comoving,
enable_PF, enable_EC,
use_iterative_solvers,
use_pressure_stabilization,
p_lagrange,
q_rhs)
return dict(solvers=solvers)
def setup_NS(w_NS, u, p, v, q, p0, q0,
dx, ds, normal,
dirichlet_bcs, neumann_bcs, boundary_to_mark,
u_1, phi_, rho_, rho_1, g_, M_, mu_, rho_e_,
c_, V_,
c_1, V_1,
dbeta, solutes,
per_tau, drho, sigma_bar, eps, dveps, grav, fric,
u_comoving,
enable_PF, enable_EC,
use_iterative_solvers, use_pressure_stabilization,
p_lagrange,
q_rhs):
""" Set up the Navier-Stokes subproblem. """
# F = (
# per_tau * rho_ * df.dot(u - u_1, v)*dx
# + rho_*df.inner(df.grad(u), df.outer(u_1, v))*dx
# + 2*mu_*df.inner(df.sym(df.grad(u)), df.grad(v))*dx
# - p * df.div(v)*dx
# + df.div(u)*q*dx
# - df.dot(rho_*grav, v)*dx
# )
mom_1 = rho_1*(u_1 + u_comoving)
if enable_PF:
mom_1 += -M_*drho * df.nabla_grad(g_)
F = (
per_tau * rho_1 * df.dot(u - u_1, v) * dx
+ fric*mu_*df.dot(u + u_comoving, v) * dx
+ 2*mu_*df.inner(df.sym(df.nabla_grad(u)),
df.sym(df.nabla_grad(v))) * dx
- p * df.div(v) * dx
+ q * df.div(u) * dx
+ df.inner(df.nabla_grad(u), df.outer(mom_1, v)) * dx
+ 0.5 * (per_tau * (rho_ - rho_1) * df.dot(u, v) * dx
+ df.dot(normal, mom_1)*df.dot(u, v) * df.ds
- df.dot(mom_1, df.nabla_grad(df.dot(u, v))) * dx)
- rho_*df.dot(grav, v) * dx
# - mu_ * df.dot(df.nabla_grad(u)*normal, v) * df.ds
)
for boundary_name, slip_length in neumann_bcs["u"].items():
F += 1./slip_length * \
df.dot(u, v) * ds(boundary_to_mark[boundary_name])
for boundary_name, pressure in neumann_bcs["p"].items():
F += pressure * df.dot(normal, v) * ds(
boundary_to_mark[boundary_name]) \
- 2*mu_*df.dot(
df.sym(df.nabla_grad(u))*normal,
v) * ds(boundary_to_mark[boundary_name])
if enable_PF:
F += phi_*df.dot(df.nabla_grad(g_), v)*dx
if enable_EC:
for ci_, ci_1, dbetai, solute in zip(c_, c_1, dbeta, solutes):
zi = solute[1]
F += df.dot(df.grad(ci_), v)*dx
if zi != 0:
F += zi*ci_1*df.dot(df.grad(V_), v)*dx
if enable_PF:
F += ci_*dbetai*df.dot(df.grad(phi_), v)*dx
if p_lagrange:
F += (p*q0 + q*p0)*dx
if "u" in q_rhs:
F += -df.dot(q_rhs["u"], v)*dx
a, L = df.lhs(F), df.rhs(F)
problem = df.LinearVariationalProblem(a, L, w_NS, dirichlet_bcs)
solver = df.LinearVariationalSolver(problem)
if use_iterative_solvers and use_pressure_stabilization:
solver.parameters["linear_solver"] = "gmres"
#solver.parameters["preconditioner"] = "jacobi"
#solver.parameters["preconditioner"] = "ilu"
return solver
def setup_PF(w_PF, phi, g, psi, h,
dx, ds, normal,
dirichlet_bcs, neumann_bcs, boundary_to_mark,
phi_1, u_1, M_1, c_1, V_1,
per_tau, sigma_bar, eps,
dbeta, dveps,
enable_NS, enable_EC,
use_iterative_solvers,
q_rhs):
""" Set up phase field subproblem. """
F_phi = (per_tau*(phi-unit_interval_filter(phi_1))*psi*dx +
M_1*df.dot(df.grad(g), df.grad(psi))*dx)
if enable_NS:
F_phi += - phi*df.dot(u_1, df.grad(psi))*dx \
+ phi*psi*df.dot(u_1, normal)*df.ds \
- M_1*psi*df.dot(df.grad(phi), normal)*df.ds
# F_phi += df.div(phi*u_1)*psi*dx
F_g = (g*h*dx
- sigma_bar*eps*df.dot(df.nabla_grad(phi), df.nabla_grad(h))*dx
- sigma_bar/eps*(
diff_pf_potential_linearised(phi,
unit_interval_filter(
phi_1))*h*dx))
if enable_EC:
F_g += (-sum([dbeta_i*ci_1*h*dx
for dbeta_i, ci_1 in zip(dbeta, c_1)])
+ 0.5*dveps*df.dot(df.nabla_grad(V_1),
df.nabla_grad(V_1))*h*dx)
for boundary_name, costheta in neumann_bcs["phi"].items():
fw_prime = diff_pf_contact_linearised(phi, unit_interval_filter(phi_1))
# Should be just surface tension!
F_g += sigma_bar*costheta*fw_prime*h*ds(
boundary_to_mark[boundary_name])
if "phi" in q_rhs:
F_phi += -q_rhs["phi"]*psi*dx
F = F_phi + F_g
a, L = df.lhs(F), df.rhs(F)
problem = df.LinearVariationalProblem(a, L, w_PF, dirichlet_bcs)
solver = df.LinearVariationalSolver(problem)
if use_iterative_solvers:
solver.parameters["linear_solver"] = "gmres"
# solver.parameters["preconditioner"] = "hypre_euclid"
return solver
def setup_EC(w_EC, c, V, b, U, rho_e,
dx, ds, normal,
dirichlet_bcs, neumann_bcs, boundary_to_mark,
c_1, u_1, K_, veps_, phi_,
solutes,
per_tau, z, dbeta,
enable_NS, enable_PF,
use_iterative_solvers,
q_rhs):
""" Set up electrochemistry subproblem. """
F_c = []
for ci, ci_1, bi, Ki_, zi, dbetai, solute in zip(
c, c_1, b, K_, z, dbeta, solutes):
F_ci = (per_tau*(ci-ci_1)*bi*dx +
Ki_*df.dot(df.nabla_grad(ci), df.nabla_grad(bi))*dx)
if zi != 0:
F_ci += Ki_*zi*ci_1*df.dot(df.nabla_grad(V), df.nabla_grad(bi))*dx
if enable_PF:
F_ci += Ki_*ci*dbetai*df.dot(df.nabla_grad(phi_),
df.nabla_grad(bi))*dx
if enable_NS:
# F_ci += df.div(ci*u_1)*bi*dx
F_ci += - ci*df.dot(u_1, df.grad(bi))*dx \
+ ci*bi*df.dot(u_1, normal)*df.ds
if solute[0] in q_rhs:
F_ci += - q_rhs[solute[0]]*bi*dx
F_c.append(F_ci)
F_V = veps_*df.dot(df.nabla_grad(V), df.nabla_grad(U))*dx
for boundary_name, sigma_e in neumann_bcs["V"].items():
F_V += -sigma_e*U*ds(boundary_to_mark[boundary_name])
if rho_e != 0:
F_V += -rho_e*U*dx
if "V" in q_rhs:
F_V += q_rhs["V"]*U*dx
F = sum(F_c) + F_V
a, L = df.lhs(F), df.rhs(F)
problem = df.LinearVariationalProblem(a, L, w_EC, dirichlet_bcs)
solver = df.LinearVariationalSolver(problem)
if use_iterative_solvers:
solver.parameters["linear_solver"] = "gmres"
# solver.parameters["preconditioner"] = "hypre_euclid"
return solver
def solve(w_, solvers, enable_PF, enable_EC, enable_NS,
freeze_NSPF, **namespace):
""" Solve equations. """
timer_outer = df.Timer("Solve system")
for subproblem, enable in zip(["PF", "EC", "NS"],
[enable_PF and not freeze_NSPF,
enable_EC,
enable_NS and not freeze_NSPF]):
if enable:
timer_inner = df.Timer("Solve subproblem " + subproblem)
mpi_barrier()
solvers[subproblem].solve()
timer_inner.stop()
timer_outer.stop()
def update(t, dt, w_, w_1, bcs, bcs_pointwise,
enable_PF, enable_EC, enable_NS, q_rhs,
freeze_NSPF, **namespace):
""" Update work variables at end of timestep. """
# Update the time-dependent source terms
for qi in q_rhs.values():
qi.t = t+dt
# Update the time-dependent boundary conditions
for boundary_name, bcs_fields in bcs.items():
for field, bc in bcs_fields.items():
if isinstance(bc.value, df.Expression):
bc.value.t = t+dt
# Update fields
for subproblem, enable in zip(["PF", "EC", "NS"],
[enable_PF and not freeze_NSPF,
enable_EC,
enable_NS and not freeze_NSPF]):
if enable:
w_1[subproblem].assign(w_[subproblem])
def equilibrium_EC(w_, x_, test_functions,
solutes,
permittivity,
mesh, dx, ds, normal,
dirichlet_bcs, neumann_bcs, boundary_to_mark,
use_iterative_solvers,
c_lagrange, V_lagrange,
**namespace):
""" Electrochemistry equilibrium solver. Nonlinear! """
num_solutes = len(solutes)
cV = df.split(w_["EC"])
c, V = cV[:num_solutes], cV[num_solutes]
if c_lagrange:
c0, V0 = cV[num_solutes+1:2*num_solutes+1], cV[2*num_solutes+1]
if V_lagrange:
V0 = cV[-1]
b = test_functions["EC"][:num_solutes]
U = test_functions["EC"][num_solutes]
if c_lagrange:
b0, U0 = cV[num_solutes+1:2*num_solutes+1], cV[2*num_solutes+1]
if V_lagrange:
U0 = test_functions["EC"][-1]
phi = x_["phi"]
q = []
sum_zx = sum([solute[1]*xj for solute, xj in zip(solutes, composition)])
for solute, xj in zip(solutes, composition):
q.append(-xj*Q/(area*sum_zx))
z = [] # Charge z[species]
K = [] # Diffusivity K[species]
beta = []
for solute in solutes:
z.append(solute[1])
K.append(ramp(phi, solute[2:4]))
beta.append(ramp(phi, solute[4:6]))
rho_e = sum([c_e*z_e for c_e, z_e in zip(c, z)])
veps = ramp(phi, permittivity)
F_c = []
for ci, bi, c0i, b0i, solute, qi, betai, Ki in zip(
c, b, c0, b0, solutes, q, beta, K):
zi = solute[1]
F_ci = Ki*(df.dot(
df.nabla_grad(bi),
df.nabla_grad(ci) + df.nabla_grad(betai) + zi*ci*df.nabla_grad(V)))*dx
if c_lagrange:
F_ci += b0i*(ci-df.Constant(qi))*dx + c0i*bi*dx
F_V = veps*df.dot(df.nabla_grad(U), df.nabla_grad(V))*dx
for boundary_name, sigma_e in neumann_bcs["V"].items():
F_V += -sigma_e*U*ds(boundary_to_mark[boundary_name])
if rho_e != 0:
F_V += -rho_e*U*dx
if V_lagrange:
F_V += V0*U*dx + V*U0*dx
F = sum(F_c) + F_V
J = df.derivative(F, w_["EC"])
problem = df.NonlinearVariationalProblem(F, w_["EC"],
dirichlet_bcs["EC"], J)
solver = df.NonlinearVariationalSolver(problem)
solver.parameters["newton_solver"]["relative_tolerance"] = 1e-7
if use_iterative_solvers:
solver.parameters["newton_solver"]["linear_solver"] = "bicgstab"
if not V_lagrange:
solver.parameters["newton_solver"]["preconditioner"] = "hypre_amg"
solver.solve()
def discrete_energy(x_, solutes, density, permittivity,
c_cutoff, EC_scheme, dt,
density_per_concentration,
surface_tension, interface_thickness,
enable_NS, enable_PF, enable_EC,
**namespace):
if x_ is None:
E_list = []
if enable_NS:
E_list.append("E_kin")
if enable_PF:
E_list.append("E_phi")
if enable_EC:
E_list.extend(
["E_{}".format(solute[0])
for solute in solutes] + ["E_V"])
return E_list
if enable_NS:
rho = density[0]
veps = permittivity[0]
u = x_["u"]
# grad_p = df.grad(x_["p"])
if enable_PF:
sigma_bar = surface_tension*3./(2*math.sqrt(2))
eps = interface_thickness
phi = x_["phi"]
rho = ramp(phi, density)
veps = ramp(phi, permittivity)
if enable_EC:
grad_V = df.nabla_grad(x_["V"])
M_list = []
for solute in solutes:
ci = x_[solute[0]]
if enable_PF:
betai = ramp(phi, solute[4:6])
else:
betai = solute[4]
M_list.append(alpha(ci) + betai*ci)
E_list = []
if enable_NS:
E_list.append(0.5*rho*df.dot(u, u))
if enable_PF:
E_list.append(sigma_bar/eps*pf_potential(phi)
+ 0.5*sigma_bar*eps*df.dot(df.nabla_grad(phi),
df.nabla_grad(phi)))
if enable_EC:
E_list.extend(
M_list + [0.5*veps*df.dot(grad_V, grad_V)])
return E_list
|
"""Report generator from A2T.
Copyright (c) 2019 Red Hat Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import csv
import os
import sys
import platform
def export_header(csv_writer):
"""Export CSV header."""
csv_writer.writerow(["#",
"Test name", "Method",
"Ecosystem", "Package", "Version",
"Manifest",
"Thread#", "Status code",
"Analysis results",
"Start time", "End time", "Duration"])
def export_environment_info(csv_writer):
"""Export basic information about test environment."""
csv_writer.writerow(["System family", os.name])
csv_writer.writerow(["System", platform.system()])
csv_writer.writerow(["Version", platform.release()])
csv_writer.writerow(["Python", "{}.{}".format(sys.version_info.major, sys.version_info.minor)])
csv_writer.writerow(["Path to interpret", sys.executable])
def export_test_setup(csv_writer, test):
"""Export information about test setup."""
csv_writer.writerow(["Name", "Component analysis", "Stack analysis",
"Python payload", "Maven payload", "NPM payload",
"Improper payload", "Mix payloads",
"Check responses", "Export responses", "Comment"])
csv_writer.writerow([test["Name"], test["Component analysis"], test["Stack analysis"],
test["Python payload"], test["Maven payload"], test["NPM payload"],
test["Improper payload"], test["Mix payloads"],
test["Check responses"], test["Export responses"], test["Comment"]])
def export_test_results(csv_writer, results):
"""Export results for all tests/API calls."""
for i in range(results.qsize()):
result = results.get()
csv_writer.writerow([i + 1,
result["name"], result["method"],
result["ecosystem"], result["package"], result["version"],
result["manifest"],
result["thread_id"],
result["status_code"],
result["analysis"],
result["started"], result["finished"], result["duration"]])
def export_totat_time(csv_writer, start, end, duration):
"""Export informations about total time."""
csv_writer.writerow(["Start time", start])
csv_writer.writerow(["End time", end])
csv_writer.writerow(["Duration", duration])
def generate_csv_report(results, test, start, end, duration, filename):
"""Generate CSV report with all A2T tests."""
with open(filename, 'w', encoding='utf8') as fout:
csv_writer = csv.writer(fout)
export_environment_info(csv_writer)
csv_writer.writerow([])
export_test_setup(csv_writer, test)
csv_writer.writerow([])
export_totat_time(csv_writer, start, end, duration)
csv_writer.writerow([])
export_header(csv_writer)
export_test_results(csv_writer, results)
|
# Generated by Django 3.0.6 on 2020-05-16 14:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contacts', '0004_auto_20200516_0053'),
]
operations = [
migrations.AlterField(
model_name='contacttag',
name='contact',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contacttags', to='contacts.Contact'),
),
migrations.AlterUniqueTogether(
name='contacttag',
unique_together={('contact', 'tag')},
),
]
|
import numpy as np
import math
import random
from copy import copy
def encodeKey(index,value):
return "{},{}".format(index,value)
def decodeKey(key):
return list(map(lambda x: int(x),key.split(",")))
#returns an expression to get the transformed coordinates
# from the original dimensions to the 1 dimension flattened data
def getExpr(size):
val = ""
lst = []
if len(size) > 1:
for i in range(1,len(size)):
temp = "xi[{}]".format(i-1)
for j in range(i,len(size)):
temp += "*{}".format(size[j])
lst.append(temp)
else:
i = 0
val += "+".join(lst)
val += "+xi[{}]".format(i)
return val
#returns an array with the position in the flattened data
#coords is an array with coordinate relative to the cell in the original dimensions
# size = np.shape(data)
def getNeighbors(cell,coords,size):
newCoords = []
expr = getExpr(size)
for coord in coords:
xi = []
outOfBounds = False
for i,c in enumerate(cell):
if type(coord) != int:
v = c+coord[i]
if v >= size[i] or v < 0:
outOfBounds = True
else:
xi.append(v)
else:
v = c+coord
if v >= size[0] or v < 0:
outOfBounds = True
else:
xi.append(c+coord)
if outOfBounds:
newCoords.append(-1)
else:
newCoord = eval(expr)
newCoords.append(newCoord)
return newCoords
#returns the values of the neighbors of a certain cell
#data = flattened array of the data
#neighbors = the positions of neighbors of a certain cell
#paddingtype = 0 => don't get values,1=> fill with padding value, 2 => don't fill and return empty dict
#paddingvalue = the values to fill when the padding type equals 1
def getNeighborsValue(data,neighbors,paddingType = 0,paddingValue=0):
values = {}
for i,n in enumerate(neighbors):
val = None
if n >= 0 and n < len(data):
val = data[n]
else:
if paddingType == 0: continue
elif paddingType == 1:
val = paddingValue
elif paddingType == 2:
values = None
break
if val != None:
values[i] = val
return values
#returns in each iteration an array with the indexes of each dimension
def multiDimensionalGenerator(size):
counters = np.array([size[i]-1 for i in range(len(size)-1)])
counters = np.append(counters,size[-1])
count = len(size)-1
while (counters[0] >= 0):
counters[count] -= 1
yield [int(i) for i in counters]
if counters[count] <= 0:
while(counters[count] <= 0 and count > 0):
count -= 1
counters[count] -= 1
while(count+1 < len(size)):
if count+1 == len(size)-1:
counters[count+1] = size[count+1]
else:
counters[count+1] = size[count+1]-1
count += 1
def manhattanDistance(arr):
res = 0
for i in arr:
res += abs(i)
return res
def vonNeumann(radious,distance):
expr = lambda x: manhattanDistance(x) <= distance
return getNeighborhood(radious,expr)
def moore(radious):
expr = lambda x: True
neighborhood = getNeighborhood(radious,expr)
return neighborhood
#returns an array with the neighborhood
#expression = function to filter the neighborhood, receives a list of the indexes according to the dimension
#radious = array with the distance from each dimension
def getNeighborhood(radious,expression):
neighborhood = []
spaces = []
dimensions = len(radious)
for i in range(dimensions):
size = radious[i]
spaces.append(np.arange(-size, size+1, 1))
mesh = np.meshgrid(*spaces)
stack = np.stack(mesh,axis=dimensions)
stackShape = np.shape(stack)[:-1]
for index in multiDimensionalGenerator(stackShape):
tIndex = tuple(index)
if expression(stack[tIndex]):
neighborhood.append(stack[tIndex])
for i in range(dimensions-1,-1,-1):
neighborhood.sort(key = lambda x: x[i])
return neighborhood
def dictToMat(dic):
matrix = []
for key in dic:
for key2 in dic[key]:
temp = copy(dic[key][key2])
if type(temp) == np.ndarray:
temp = np.append(temp,key)
else:
temp.append(key)
matrix.append(temp)
return matrix
def getDomain(data):
#encontramos los valores observados de cada atributo en los subproblemas de aprendizaje
domain = {}
for row in data:
for j,col in enumerate(row):
if j not in domain:
domain[j] = []
if col not in domain[j]:
domain[j].append(col)
for key in domain:
domain[key].sort()
return domain
def binarize(data,domain):
#binarizamos los subproblemas de aprendizaje
binarizedMatrix = []
for row in data:
rtemp = []
for j,col in enumerate(row):
if j < len(row)-1:
for val in domain[j]:
if col >= val:
rtemp.append(1)
else:
rtemp.append(0)
else:
rtemp.append(col)
binarizedMatrix.append(rtemp)
return binarizedMatrix
def getLearningProblem(data,neighborhood,paddingType,paddingValue):
problem = {}
dataSize = np.shape(data)
stateSize = dataSize[1:]
noStates = dataSize[0]
for iState in range(1,noStates):
currentState = data[iState-1].reshape(-1)
for cell in multiDimensionalGenerator(stateSize):
index = tuple([iState]+cell)
cls = data[index]
if cls not in problem:
problem[cls] = {}
neighbors = getNeighbors(cell,neighborhood,stateSize)
values = getNeighborsValue(currentState,neighbors,paddingType,paddingValue)
if values != None:
values = [values[key] for key in values]
problem[cls][str(values)] = values
return problem
#individual format (ant,cons) where:
# ant = [[attrInd,val],...,[attrInd,val]]
# cons = [attrInd,val]
#Returns the number of active attributes in the antecedent
def getNumberOfAttributes(ant):
count = 0;
for attr in ant:
if attr[1] != -1:
count += 1
return count
#Returns the interestingess degree
#totalInfoGain = the summatory of the infoGain for each attribute in the antecedent
#noOfAttr = the number of attributes in the antecedent
#domainCardinality = the Cardinality of the goal attribute
def antInterestignessDegree(totalInfoGain,noOfAttr,domainCardinality):
if (noOfAttr == 0 ):
return 1
return 1 - ((totalInfoGain/noOfAttr)/math.log2(domainCardinality))
def consInterestignessDegree(consAttr,noEvents,beta):
noFavEvents = noEvents[consAttr[0]][consAttr[0]][consAttr[1]][consAttr[1]]
totalNoEvents = noEvents["totalNoEvents"]
return math.pow( 1 - probability(noFavEvents,totalNoEvents),(1/beta) )
#returns the infoGain of an antecedent attribute with a given goal attribute
#attAnt = the antecedent attribute (pair of attr index and value)
#attCons = the consequent attribute (pair of attr index and value)
#domain = the domain of the attributes
#noEvents = tha total number of events for the probability calculation
def infoGain(attAnt,attCons,domain,noEvents):
#print("---",info(domain,noEvents,attCons),info(domain,noEvents,attCons,attAnt),"----")
return info(domain,noEvents,attCons) - info(domain,noEvents,attCons,attAnt)
#returns the entropy of the goal attribute or the entropy ot he goal attribute given antecedent attribute
#domain = the domain of the attributes
#noEvents = tha total number of events for the probability calculation
#attCons = the consequent attribute (pair of attr index and value)
#attAnt = the antecedent attribute (pair of attr index and value)
def info(domain,noEvents,attCons,attAnt = None):
res = 0
if attAnt == None:
for val in domain[attCons[0]]:
noFavEvents = noEvents[attCons[0]][attCons[0]][val][val]
totalNoEvents = noEvents["totalNoEvents"]
pr = probability(noFavEvents,totalNoEvents)
res += (pr*math.log2(pr))
res = res * -1
else:
for val in domain[attAnt[0]]:
totalNoEvents = noEvents["totalNoEvents"]
noFavEvents = 0
for gAttr in noEvents[attAnt[0]]:
for gVal in noEvents[attAnt[0]][gAttr]:
noFavEvents += noEvents[attAnt[0]][gAttr][gVal][val]
prAntAtt = probability(noFavEvents,totalNoEvents)
sumCondInfo = 0
for cVal in domain[attCons[0]]:
probCA = probability(noEvents[attAnt[0]][attCons[0]][cVal][val],totalNoEvents)
probA = probability(noFavEvents,totalNoEvents)
condProb = probCA / probA
if (condProb>0):
sumCondInfo += (condProb*math.log2(condProb))
sumCondInfo *= -1
res += sumCondInfo * prAntAtt
return res
def probability(noFavEvents,noEvents):
return noFavEvents/noEvents
#Calculate the number of events given each possible value of the goal attributes indexes specified
#goalAttributes = an array with the goal attributes
#domain = the domain of the attributes
#dataset = the dataset where the data that will be processed
def calculateNoEvents(goalAttributes,domain,dataset):
noEventsC = {}
noEvents = 1
#for step in dataset:
# noEvents += len(step)
for val in np.shape(dataset)[:-1]:
noEvents = noEvents*val
for attr in domain:
noEventsC[attr] = {}
for g in goalAttributes:
noEventsC[attr][g] = {}
for gval in domain[g]:
noEventsC[attr][g][gval] = {}
for val in domain[attr]:
noEventsC[attr][g][gval][val] = 0
size = np.shape(dataset)
for index in multiDimensionalGenerator(size):
ind = tuple(index)
val = dataset[ind]
attr = index[-1]
for g in goalAttributes:
ind2 = tuple(index[:-1]+[g])
gval = dataset[ind2]
noEventsC[attr][g][gval][val] += 1
noEventsC["totalNoEvents"] = noEvents
return noEventsC
#Returns the accuracy of the antecedent with the consequent
#ant = the array of attributes
#cons = the attribute
#dataset = the data that will be processed
def predictionAccuracy(ant,cons,dataset):
acCount = {}
aCount = 0
size = np.shape(dataset)[:-1]
for index in multiDimensionalGenerator(size):
ind = tuple(index)
vAnt = True
row = dataset[ind]
for att in ant:
vAnt = vAnt and ((row[att[0]] == att[1]) if att[1] != -1 else True)
if row[cons[0]] not in acCount:
acCount[row[cons[0]]] = 0
if vAnt:
acCount[row[cons[0]]] += 1
aCount += 1
for key in acCount:
if aCount > 0:
acCount[key] = (acCount[key] - 1/2)/aCount
return acCount
def predictionAccuracy2(ant,cons,dataset):
acCount = {"accepted":{},"rejected":{}}
aCount = 0
size = np.shape(dataset)[:-1]
for index in multiDimensionalGenerator(size):
ind = tuple(index)
vAnt = True
row = dataset[ind]
for att in ant:
vAnt = vAnt and ((row[att[0]] == att[1]) if att[1] != -1 else True)
if row[cons[0]] not in acCount["accepted"]:
acCount["accepted"][row[cons[0]]] = 0
if row[cons[0]] not in acCount["rejected"]:
acCount["rejected"][row[cons[0]]] = 0
if vAnt:
acCount["accepted"][row[cons[0]]] += 1
aCount += 1
else:
acCount["rejected"][row[cons[0]]] += 1
return acCount
def f1score(acc):
recall = {}
precision = {}
f1 = {}
for key in acc["accepted"]:
recall[key] = acc["accepted"][key]
precision[key] = acc["accepted"][key]
f1[key] = 0
for key2 in acc["rejected"]:
if key == key2:
recall[key] += acc["rejected"][key]
for key2 in acc["accepted"]:
if key != key2:
precision[key] += acc["accepted"][key2]
recall[key] = acc["accepted"][key] / recall[key]
precision[key] = (acc["accepted"][key] / precision[key]) if precision[key] != 0 else 0
if (precision[key] + recall[key]) != 0:
f1[key] = recall[key] * precision[key] / (precision[key] + recall[key])
else:
f1[key] = 0
return f1
#Returns the fitnes of an individual
def gafitness(w1,w2,beta,ant,cons,domain,noEvents,dataset):
bestGoalValue = 0
noAttr = 0
noAttr = getNumberOfAttributes(ant)
consInt = {}
sumInfoGain= {}
antInt = {}
acc = predictionAccuracy(ant,cons,dataset)
for val in domain[cons[0]]:
consInt[val] = consInterestignessDegree([cons[0],val],noEvents,beta)
if val not in sumInfoGain:
sumInfoGain[val] = 0
for attr in ant:
if attr[1] != -1:
sumInfoGain[val] += infoGain(attr,[cons[0],val],domain,noEvents)
antInt[val] = antInterestignessDegree(sumInfoGain[val],noAttr,len(domain[cons[0]]))
fit = ((w1*(antInt[val] + consInt[val]) / 2) + (w2 * acc[val])) / (w1 + w2)
#print("fit {},antInt {},consInt {},acc {}".format(fit,antInt[val],consInt[val],acc[val]))
#print(fit)
if fit > bestGoalValue:
bestGoalValue = fit
cons[1] = val
return bestGoalValue
#Returns the fitnes of an individual
def gafitness2(w1,w2,beta,ant,cons,domain,noEvents,dataset):
bestGoalValue = 0
noAttr = 0
noAttr = getNumberOfAttributes(ant)
consInt = {}
sumInfoGain= {}
antInt = {}
acc = predictionAccuracy2(ant,cons,dataset)
acc = f1score(acc)
#print(acc)
for val in domain[cons[0]]:
consInt[val] = consInterestignessDegree([cons[0],val],noEvents,beta)
if val not in sumInfoGain:
sumInfoGain[val] = 0
for attr in ant:
if attr[1] != -1:
sumInfoGain[val] += infoGain(attr,[cons[0],val],domain,noEvents)
antInt[val] = antInterestignessDegree(sumInfoGain[val],noAttr,len(domain[cons[0]]))
fit = ((w1*(antInt[val] + consInt[val]) / 2) + (w2 * acc[val])) / (w1 + w2)
#print("fit {},antInt {},consInt {},acc {}".format(fit,antInt[val],consInt[val],acc[val]))
#print(fit)
if fit > bestGoalValue:
bestGoalValue = fit
cons[1] = val
return bestGoalValue
def initialize(populationSize,antMinSize,antMaxSize,objAttrInd,domain,seed=-1):
population = []
if seed != -1:
random.seed(seed)
for i in range(populationSize):
antSize = random.randint(antMinSize,antMaxSize)
ant = [[i,-1] for i in range(len(domain))]
for j in range(antSize):
attr = random.randint(0,len(domain)-1)
val = random.randint(-1,max(domain[attr]))
ant[attr][1]= val
valC = random.randint(min(domain[objAttrInd]),max(domain[objAttrInd]))
cons = [objAttrInd,valC]
population.append([ant,cons])
return population
def countActiveGenes(ant):
count = 0
for gen in ant:
if gen[1] != -1:
count += 1
return count
def insertCondition(ant,antMaxSize,domain):
active = countActiveGenes(ant)
prob = 1-(active/antMaxSize)
if random.random() < prob:
for gen in ant:
if random.random() < .2 and active < antMaxSize:
if gen[1] == -1:
ind = random.randint(0,len(domain[gen[0]])-1)
gen[1] = domain[gen[0]][ind]
active += 1
prob = 1-(active/antMaxSize)
def removeCondition(ant,antMaxSize,domain):
active = countActiveGenes(ant)
prob = (active/antMaxSize)
if random.random() < prob:
for gen in ant:
if active > 1:
if random.random() < .2:
if gen[1] != -1:
gen[1] = -1
active -= 1
prob = (active/antMaxSize)
def tournament(fitnessTbl,k):
best = None
for i in range(k+1):
ind = random.randint(0, len(fitnessTbl)-1)
if (best == None) or fitnessTbl[ind][1] > fitnessTbl[best][1]:
best = ind
return fitnessTbl[best]
def crossover(parents,population,crossprob):
offsprings = []
for i in range(1,len(parents),2):
p1 = population[parents[i-1][0]][0]
p2 = population[parents[i][0]][0]
child1 = [[],[population[parents[i-1][0]][1][0],population[parents[i-1][0]][1][1]]]
child2 = [[],[population[parents[i][0]][1][0],population[parents[i][0]][1][1]]]
for j in range(len(p1)):
if random.random() < crossprob:
child1[0].append([p2[j][0],p2[j][1]])
child2[0].append([p1[j][0],p1[j][1]])
else:
child1[0].append([p1[j][0],p1[j][1]])
child2[0].append([p2[j][0],p2[j][1]])
offsprings.append(child1)
offsprings.append(child2)
return offsprings
def mutate(ant,domain,mutationRate):
for gen in ant:
if random.random() <= mutationRate:
ind = random.randint(0,len(domain[gen[0]])-1)
gen[1] = domain[gen[0]][ind]
def removePopulation(population,fitnessTbl,populationSize):
newPopulation = []
newPopulation = [population[fitnessTbl[0][0]] ]#for x in fitnessTbl[:populationSize]]
for i in range(populationSize):
elite = tournament(fitnessTbl,2)
newPopulation.append(population[elite[0]])
fitnessTbl.remove(elite)
if len(fitnessTbl) <= 0: break
return newPopulation
def ganuggets(populationSize,noOffsprings,antMinSize,antMaxSize,beta,w1,w2,mutationRate,crossprob,dataset,domain,goalAttr,noEvents,seed,maxIter = 0):
population = initialize(populationSize,antMinSize,antMaxSize,goalAttr,domain,seed)
fitnessTbl = []
for i in range(len(population)):
fit = gafitness(w1,w2,beta,population[i][0],population[i][1],domain,noEvents,dataset)
fitnessTbl.append([i,fit,population[i][1][1]])
it = 0
fitGoalReached = False
fitnessHistory = {}
while it < maxIter and not fitGoalReached:
print(it)
it += 1
fitnessTbl = sorted(fitnessTbl,key = lambda x: x[1],reverse = True)
#select individuals based on fitness
groupedFitness = {}
for fit in fitnessTbl:
if fit[2] not in groupedFitness:
groupedFitness[fit[2]] = []
if fit[2] not in fitnessHistory:
fitnessHistory[fit[2]] = []
groupedFitness[fit[2]].append(fit)
parents = {}
offsprings = []
for key in groupedFitness:
if len(groupedFitness[key]) > 0:
#print("1.- ",groupedFitness[key][0])
fitnessHistory[key].append(groupedFitness[key][0][1])
if key not in parents:
parents[key] = []
for i in range(noOffsprings*2):
best = tournament(groupedFitness[key],2)
parents[key].append(best)
offsprings += crossover(parents[key],population,crossprob)
for child in offsprings:
mutate(child[0],domain,mutationRate)
insertCondition(child[0],antMaxSize,domain)
removeCondition(child[0],antMaxSize,domain)
population = population+offsprings
fitnessTbl = []
for i in range(len(population)):
fit = gafitness(w1,w2,beta,population[i][0],population[i][1],domain,noEvents,dataset)
fitnessTbl.append([i,fit,population[i][1][1]])
fitnessTbl = sorted(fitnessTbl,key = lambda x: x[1],reverse = True)
groupedFitness = {}
for fit in fitnessTbl:
if fit[2] not in groupedFitness:
groupedFitness[fit[2]] = []
groupedFitness[fit[2]].append(fit)
temPop = []
for key in groupedFitness:
#print("2.- ",groupedFitness[key][0])
if len(groupedFitness[key]) > 0:
temPop += removePopulation(population,groupedFitness[key],populationSize)
population = temPop
fitnessTbl = []
for i in range(len(population)):
fit = gafitness(w1,w2,beta,population[i][0],population[i][1],domain,noEvents,dataset)
fitnessTbl.append([i,fit,population[i][1][1]])
fitnessTbl = sorted(fitnessTbl,key = lambda x: x[1],reverse = True)
return fitnessTbl,population,fitnessHistory
def populationPostprocessing(population):
rules = {}
for ind in population:
if ind[1][1] not in rules:
rules[ind[1][1]] = []
for gen in ind[0]:
if gen[1] != -1:
rules[ind[1][1]].append(["{},{}".format(gen[0],gen[1])])
return rules
def binarizedToDomain(rules,domain):
keys = list(domain.keys())
oRules = {}
for cls in rules:
if cls not in oRules:
oRules[cls] = {}
prop = []
for clause in rules[cls]:
expr = []
for term in clause:
col = 0
ind,val = decodeKey(term)
bottom = 0
for key in keys:
if ind >= bottom and ind < (bottom + len(domain[key])):
col = key
ind -= bottom
break
bottom += len(domain[key])
relational = (">=" if val== 1 else "<")
t = "A[{}] {} {}".format(col,relational,domain[col][ind])
expr.append(t)
prop.append("({})".format(" or ".join(expr)))
oRules[cls] = " and ".join(prop)
return oRules |
from flask import Flask
from dotenv import load_dotenv
import os
from app import configure_app
if not "ENVIRONMENT" in os.environ:
os.environ["ENVIRONMENT"] = "development"
os.environ["FLASK_ENV"] = "development"
os.environ["RDS_DB_NAME"] = "storytime"
os.environ["RDS_HOSTNAME"] = "localhost"
os.environ["RDS_PASSWORD"] = ""
os.environ["RDS_USERNAME"] = "root"
os.environ["SECRET_KEY"] = "secret"
load_dotenv()
# Include application callable here so it can be used by WSGI
application = Flask(__name__)
configure_app(application)
# Run the application if it's being called by Elastic Beanstalk server
if __name__ == "__main__":
is_production = os.environ["ENVIRONMENT"] == "production"
application.run(debug=not is_production)
|
#!/usr/bin/env python
# coding: utf-8
# In[3]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
#get_ipython().magic(u'matplotlib inline')
# In[4]:
x = pd.read_csv('/Users/ombahiwal/PycharmProjects/test_kvb/omkar/CNN/train3/x_all.csv',delimiter=",")
y = pd.read_csv('/Users/ombahiwal/PycharmProjects/test_kvb/omkar/CNN/train3/y_all.csv',delimiter=",")
# In[5]:
np.shape(x)
# In[6]:
np.shape(y)
# In[7]:
x
# In[8]:
y
# In[9]:
X = x.iloc[:,1:156].values
y = y.iloc[:,0].values
X
# In[10]:
y
# In[11]:
np.shape(x)
# In[12]:
np.shape(y)
# In[13]:
from sklearn.preprocessing import StandardScaler
X_std = StandardScaler().fit_transform(X)
# In[14]:
mean_vec = np.mean(X_std, axis=0)
cov_mat = (X_std - mean_vec).T.dot((X_std - mean_vec)) / (X_std.shape[0]-1)
print('Covariance matrix \n%s' %cov_mat)
# In[15]:
print('NumPy covariance matrix: \n%s' %np.cov(X_std.T))
# In[16]:
plt.figure(figsize=(16,16))
sns.heatmap(cov_mat, vmax=1, square=True,annot=True,cmap='cubehelix')
plt.title('Correlation between different features')
# In[17]:
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
print('Eigenvectors \n%s' %eig_vecs)
print('\nEigenvalues \n%s' %eig_vals)
# In[18]:
# 6) Selecting Principal Components¶
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs.sort(key=lambda x: x[0], reverse=True)
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in descending order:')
for i in eig_pairs:
print(i[0])
# In[19]:
tot = sum(eig_vals)
var_exp = [(i / tot)*100 for i in sorted(eig_vals, reverse=True)]
var_exp
# matrix_w = np.hstack((eig_pairs[0][1].reshape(7,1),
# eig_pairs[1][1].reshape(7,1)
# ))
# print('Matrix W:\n', matrix_w)
# In[20]:
matrix_w = np.hstack((eig_pairs[0][1].reshape(154,1),
eig_pairs[1][1].reshape(154,1)
))
print('Matrix W:\n', matrix_w)
# In[30]:
Y = X_std.dot(matrix_w)
Y
# In[33]:
#clear previous plot
plt.gcf().clear()
from sklearn.decomposition import PCA
pca = PCA().fit(X_std)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlim(0, 155, 1)
plt.xlabel('Number of components')
plt.ylabel('Cumulative explained variance')
# In[39]:
print (np.cumsum(pca.explained_variance_ratio_))
import xlsxwriter
workbook = xlsxwriter.Workbook('cumm.xlsx')
worksheet = workbook.add_worksheet()
# In[ ]:
# In[42]:
from sklearn.decomposition import PCA
# Take this value as input 40 n_components
sklearn_pca = PCA(n_components=40)
Y_sklearn = sklearn_pca.fit_transform(X_std)
# In[43]:
print(Y_sklearn)
# In[44]:
Y_sklearn.shape
# In[45]:
import xlsxwriter
# Reduced Generated file, Get file name from the user
workbook = xlsxwriter.Workbook('arrays.xlsx')
worksheet = workbook.add_worksheet()
# In[46]:
row = 0
for col, data in enumerate(Y_sklearn):
worksheet.write_column(row, col, data)
workbook.close()
"""
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import tkMessageBox
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
matplotlib.use('TkAgg')
import numpy as np
from Tkinter import *
import Tkinter as tk
import tkFileDialog
class mclass:
def __init__(self, window):
self.window = window
self.box = Entry(window)
self.box.insert(0, 'enter filename')
self.button = Button(window, text="Apply PCA to Datasets", command=self.plot)
self.button2 = Button(window, text="Generate File", command=self.generate_file)
self.comp = Entry(window)
self.comp.insert(0, 'enter no. of components')
self.button.pack()
self.plot_values = []
def get_filepath(self):
fpath = tkFileDialog.askopenfilename()
return str(fpath)
def plot(self):
tkMessageBox.showinfo("Message", "Select X test file")
x = pd.read_csv(self.get_filepath(), delimiter=",")
tkMessageBox.showinfo("Message", "Select Y train file")
y = pd.read_csv(self.get_filepath(), delimiter=",")
tkMessageBox.showinfo("Processing", "Please wait...\n")
np.shape(x)
np.shape(y)
X = x.iloc[:, 1:156].values
y = y.iloc[:, 0].values
np.shape(x)
np.shape(y)
from sklearn.preprocessing import StandardScaler
self.X_std = StandardScaler().fit_transform(X)
mean_vec = np.mean(self.X_std, axis=0)
cov_mat = (self.X_std - mean_vec).T.dot((self.X_std - mean_vec)) / (self.X_std.shape[0] - 1)
print('Covariance matrix \n%s' % cov_mat)
print('NumPy covariance matrix: \n%s' % np.cov(self.X_std.T))
plt.figure(figsize=(16, 16))
sns.heatmap(cov_mat, vmax=1, square=True, annot=True, cmap='cubehelix')
plt.title('Correlation between different features')
eig_vals, eig_vecs = np.linalg.eig(cov_mat)
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:, i]) for i in range(len(eig_vals))]
eig_pairs.sort(key=lambda x: x[0], reverse=True)
for i in eig_pairs:
print(i[0])
tot = sum(eig_vals)
var_exp = [(i / tot) * 100 for i in sorted(eig_vals, reverse=True)]
matrix_w = np.hstack((eig_pairs[0][1].reshape(154, 1),
eig_pairs[1][1].reshape(154, 1)
))
print('Matrix W:\n', matrix_w)
Y = self.X_std.dot(matrix_w)
plt.gcf().clear()
from sklearn.decomposition import PCA
pca = PCA().fit(self.X_std)
self.plot_values = np.cumsum(pca.explained_variance_ratio_)
plt.plot(self.plot_values)
plt.xlim(0, 155, 1)
plt.xlabel('Number of components')
plt.ylabel('Cumulative explained variance')
fig = Figure(figsize=(6, 6))
a = fig.add_subplot(111)
a.plot(self.plot_values)
a.set_title("PCA Variance Ratio", fontsize=16)
a.set_ylabel('Cumulative explained variance', fontsize=14)
a.set_xlabel('Number of components', fontsize=14)
canvas = FigureCanvasTkAgg(fig, master=self.window)
canvas.get_tk_widget().pack()
canvas.draw()
self.comp.pack()
self.box.pack()
self.button2.pack()
tkMessageBox.showinfo("PLOT", "Graph Generated")
print (self.plot_values)
def generate_file(self, ):
import xlsxwriter
components = int(self.comp.get())
fname = str(self.box.get())
from sklearn.decomposition import PCA
# Take this value as input 40 n_components
self.sklearn_pca = PCA(n_components=components)
self.Y_sklearn = self.sklearn_pca.fit_transform(self.X_std)
print(self.Y_sklearn)
self.Y_sklearn.shape
import xlsxwriter
# Reduced Generated file, Get file name from the user
path = '/Users/ombahiwal/Documents/Interface_Tool/'+fname+'.xlsx'
workbook = xlsxwriter.Workbook(path)
worksheet = workbook.add_worksheet()
row = 0
for col, data in enumerate(self.Y_sklearn):
worksheet.write_column(row, col, data)
workbook.close()
tkMessageBox.showinfo("Message", "Dataset File with "+str(components)+" components generated at "+path)
window = Tk()
window.geometry('600x1000')
title = tk.Label(window, text="Interface Tool for REP Analysis", padx=10)
title.config(font=("Helvetica", 25))
title.pack()
start = mclass(window)
window.mainloop()
""" |
# %load q05_difference_in_gold_medal/build.py
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from greyatomlib.olympics_project.q02_rename_columns.build import q02_rename_columns
path = 'data/olympics.csv'
def q05_difference_in_gold_medal(path):
df = q02_rename_columns(path)
df1 = df.loc[:,['country name','Gold']]
df2 = df1.iloc[:,[0,1,2]]
df3 = df2.set_index(keys='country name')
df3.loc[:,:] = df3.loc[:,:].apply(pd.to_numeric)
df3.drop(['Totals'],inplace=True)
df4 = df3.iloc[:,0] - df3.iloc[:,1]
df4 = df4.abs()
return df4.max()
#q05_difference_in_gold_medal(path)
|
__author__ = 'snaumov'
from flask_wtf import Form
from wtforms import BooleanField, StringField, validators, DateTimeField, IntegerField, FileField, TextAreaField
class Page(Form):
PageContent = TextAreaField('PageContent', [validators.Length(min=3, max=100000, message='Enter your text')])
|
#!/usr/bin/python3
import re
import os
import sys
#f=open("raw.out")
if len(sys.argv)<3:
print("Usage: ./hexaddr_to_linenum.py <output file> <binary file>")
sys.exit()
f=open(sys.argv[1],'r')
#f=open("converted.out","w")
lines=f.readlines()
lines=''.join(lines)
#print(lines)
last_pos=0
for l,r in [(m.start(0), m.end(0)) for m in re.finditer('0[xX][0-9a-fA-F]+', lines)]:
#print('matches')
#print(l,r)
#print(lines[l:r])
linenum=os.popen('addr2line {} -e {}'.format(lines[l:r],sys.argv[2])).read()
print(lines[last_pos:l],end='')
print(linenum[:-1],end='')
last_pos=r
print(lines[last_pos:-1],end='')
|
# Generated by Django 2.2.1 on 2019-11-25 15:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('machines', '0034_auto_20191120_1920'),
('machines', '0034_auto_20191125_1851'),
]
operations = [
]
|
from django.db import models
# Create your models here.
class ProjectsManager(models.Manager):
def getProjectsList(self):
projects_list = {}
for proj in super(ProjectsManager, self).get_queryset().all():
projects_list[proj.id] = {"name":proj.name}
return projects_list
def getProjectById(self, proj_id):
project = {}
proj = super(ProjectsManager, self).get_queryset().get(id=proj_id)
project = {"id":proj.id, "name":proj.name, "data":proj.data}
return project
def getObjectByID(self, proj_id):
return super(ProjectsManager, self).get_queryset().get(id=proj_id)
def deleteById(self, proj_id):
proj = super(ProjectsManager, self).get_queryset().get(id=proj_id)
proj.delete()
class Progects(models.Model):
name = models.CharField(max_length=100)
data = models.TextField()
manager = ProjectsManager()
|
from celery import task
import celery
import datetime
from novajoy.views import send_mail1
@celery.decorators.periodic_task(run_every=datetime.timedelta(seconds=20))
def my():
send_mail1("cska631@gmail.com", "subj","text") |
#!/usr/bin/env python2
import os
import subprocess
WORK_DIR = 'work'
def checkOutput(s):
if 'Segmentation fault' in s or 'error' in s.lower():
return False
else:
return True
corpus_dir = os.path.join(WORK_DIR, 'corpus')
corpus_filenames = os.listdir(corpus_dir)
for f in corpus_filenames:
testcase_path = os.path.join(corpus_dir, f)
cmd = ['bin/asan/pdfium_test', testcase_path]
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = process.communicate()[0]
if not checkOutput(output):
print testcase_path
print output
print '-' * 80
|
# Generated by Django 3.1.7 on 2021-05-26 13:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('surat', '0003_penduduk_pendidikan'),
]
operations = [
migrations.CreateModel(
name='Sku',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('no', models.CharField(max_length=200)),
('no_surat', models.CharField(max_length=200)),
('tanggal', models.CharField(max_length=200)),
('keterangan', models.CharField(max_length=200)),
('nik', models.CharField(max_length=200)),
('nama_lengkap', models.CharField(max_length=200)),
('jenis_kelamin', models.CharField(blank=True, choices=[('Laki-Laki', 'Laki-Laki'), ('Perempuan', 'Perempuan')], max_length=200, null=True)),
('tanggal_lahir', models.CharField(max_length=200)),
('pendidikan', models.CharField(blank=True, choices=[('SD', 'SD'), ('SLTP', 'SLTP'), ('SLTA', 'SLTA'), ('Pelajar/Mahasiswa', 'Pelajar/Mahasiswa')], max_length=200, null=True)),
('dusun', models.CharField(blank=True, choices=[('Dusun Matikan', 'Dusun Matikan'), ('Dusun Krajan', 'Dusun Krajan'), ('Dusun Kejawan', 'Dusun Kejawan')], max_length=200, null=True)),
('no_rt', models.CharField(max_length=200)),
('no_rw', models.CharField(max_length=200)),
],
options={
'verbose_name_plural': 'sku',
},
),
]
|
# TODO: transform audio to features
# load audio into FeatureExtractor
# transform into features
# TODO: load model
import os
import re
import pyaudio
import numpy as np
from matplotlib import pyplot as plt
from FLAGS import PREDICTION_FLAGS
from FeatureExtraction import FeatureExtractor
from Models import predict_from_saved_model, convert_to_strings
from transformer_support import masked_pipeline_from_trained_model
def read_chunk(stream, chunk_size):
data_string = stream.read(chunk_size)
return np.frombuffer(data_string, dtype=np.float32)
def record_audio(record_seconds):
audio_format = pyaudio.paFloat32
chunk_size = int(PREDICTION_FLAGS.recording['rate']/PREDICTION_FLAGS.recording['updates_per_second'])
frames = []
p = pyaudio.PyAudio()
stream = p.open(format=audio_format,
channels=PREDICTION_FLAGS.recording['channels'],
rate=PREDICTION_FLAGS.recording['rate'],
input=True,
output=True,
frames_per_buffer=chunk_size)
for i in range(PREDICTION_FLAGS.recording['updates_per_second']*record_seconds):
if i % PREDICTION_FLAGS.recording['updates_per_second'] == 0:
print(str(int(i/PREDICTION_FLAGS.recording['updates_per_second'])) + " s ...")
audio_chunk = read_chunk(stream, chunk_size)
frames.extend(audio_chunk)
timespan = np.arange(0, record_seconds, 1/PREDICTION_FLAGS.recording['rate'])
return timespan, frames, stream
def plot_audio(timespan, frames, axes=plt):
axes.plot(timespan, frames)
axes.autoscale(enable=True, axis='x', tight=True)
title_str = "Audio signal"
xlabel_srt = "Time (s)"
ylabel_str = "Amplitude (1)"
if isinstance(axes, plt.Axes):
axes.set_title(title_str)
axes.set_xlabel(xlabel_srt)
axes.set_ylabel(ylabel_str)
else:
axes.title(title_str)
axes.xlabel(xlabel_srt)
axes.ylabel(ylabel_str)
def mask_sentence(sentence, fill_mask_pipeline):
words = sentence.split(" ")
nW = len(words)
for i in range(nW):
words[i] = "[MASK]"
masked_sent = " ".join(words)
if i+1 == nW:
masked_sent += "."
results = fill_mask_pipeline(masked_sent)
top_sent = re.sub(r"(\[CLS\]|\[SEP\])", "", results[0]["sequence"])
print(top_sent)
# TODO: continue
sentence = top_sent
words = sentence.split(" ")
return sentence
if __name__ == '__main__':
# set logging to only show errors
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# print("INITIALIZING LANGUAGE MODEL PIPELINE".center(50, "_")
# lm_mask_pipeline = masked_pipeline_from_trained_model(PREDICTION_FLAGS.models['lm_path'])
print("INITIALIZING FEATURE EXTRACTOR".center(50, "_"))
extractor = FeatureExtractor(PREDICTION_FLAGS.recording['rate'],
feature_type=PREDICTION_FLAGS.features['type'],
energy=PREDICTION_FLAGS.features['energy'],
deltas=PREDICTION_FLAGS.features['deltas'])
print("RECORDING AUDIO".center(50, "_"))
timespan, frames, stream = record_audio(5)
print("CONVERTING TO FEATURE REPRESENTATION".center(50, "_"))
features = extractor.transform_data([np.array(frames)])[0]
print("PREDICTING FROM SAVED MODEL".center(50, "_"))
predictions = predict_from_saved_model(PREDICTION_FLAGS.models['am_path'], features)
print("TRANSCRIBING TO STRINGS".center(50, "_"))
string_predictions = convert_to_strings(predictions, apply_autocorrect=True, digitize=True)
# print("RUNNING THROUGH LANGUAGE MODEL".center(50, "_")
# sentence = mask_sentence(decoded_predictions[0][2], lm_mask_pipeline)
print("RESULT".center(50, "_"))
print(f"AM: {string_predictions[0][2]}")
# print(f"LM: {sentence}")
print("REPLAYING AUDIO STREAM".center(50, "_"))
stream.write(b"".join(frames))
print("CLOSING STREAM".center(50, "_"))
stream.stop_stream()
stream.close()
print("PLOTTING AUDIO AND FEATURES".center(50, "_"))
fig, ax = plt.subplots(2)
plot_audio(timespan, frames, axes=ax[0])
extractor.plot_cepstra([features], 1, axes=ax[1])
plt.show()
|
#!/usr/bin/python3
import re
with open('aoc13_input.txt') as f:
matches = re.findall(r"(\d+): (\d+)", f.read())
layers = {int(x[0]): int(x[1]) for x in matches}
def get_severity(delay):
severity = 0
caught = False
for layer, depth in layers.items():
if (layer + delay) % ((depth - 1) * 2) == 0:
caught = True
severity += layer * depth
return (severity, caught)
severity, caught = get_severity(0)
print("Part I: %d" % severity)
delay = 0
while caught:
delay += 1
severity, caught = get_severity(delay)
print("Part II: %d" % delay)
|
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
from cratz import Cratz
first_word = '本田未央'
second_word = '高垣 楓'
c = Cratz(first_word, second_word)
print(c.levenshtein_distance(normalized=True))
|
import sys
from pygame.sprite import Group
import game_functions as gf
import pygame
from settings import Settings
from ship import Ship
from alien import Alien
def run_game():
#初始化游戏并开始创建一个屏幕对象
#pygame.init()
#screen=pygame.display.set_mode((1200,800))
pygame.display.set_caption("Alien Invasion")
ai_settings =Settings()
screen =pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height))
#创建一艘飞船
ship= Ship(ai_settings,screen)
#创建一个用于存储子子弹的编组
bullets = Group()
#创建一个外星人
alien = Alien(ai_settings,screen)
#bg_clolor = (230, 230, 230)
while True:
#监视键盘和鼠标事件
gf.check_events(ai_settings,screen,ship,bullets)
ship.update()
#删除消失的子弹
gf.update_bullets(bullets)
gf.update_screen(ai_settings, screen, ship,alien, bullets)
run_game() |
list1 = ("a","b","c")
print("The second value in the list is " + list1[1])
for listItem in list1:
print("The value is: ", ends="")
print(listItem)
list2 = ["a", "b", "c"]
list2.remove("a")
print(listItem)
list2.pop(0)
print("***")
for listItem in list2:
print("The value is: ", end="")
print(listItem)
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 6 21:29:20 2020
@author: Vijay
"""
import re
import csv
from tkinter import * ;
from tkinter.ttk import *
from fpdf import FPDF
from datetime import datetime
from datetime import date
# importing askopenfile function
# from class filedialog
from tkinter.filedialog import askopenfile
root = Tk()
root.title("ATDCS-V.C19")
root.iconbitmap(r"sahyadri.ico")
root.geometry('1000x600')
#filename = PhotoImage(file = "C:\\Users\\Vijay\\Pictures\d3.png")
#background_label = Label(root, image=filename)
#background_label.place(x=0, y=0, relwidth=1, relheight=1)
# This function will be used to open
# file in read mode and only Python files
# will be opened
li=[]
st_pre=[]
sem_sec=None
def gen_absen():
fname=askopenfile(mode ='r', filetypes =[("all files","*.*")])
for line in fname:
line=line.rstrip();
line=re.findall('[0-9][a-zA-Z]{2}[0-9]{2}[a-zA-Z]{2}[0-9]{3}',line)
if len(line)is not 0:
for i in line:
if i.upper() not in st_pre:
st_pre.append(i.upper())
st_pre.sort()
print("list of students who are present")
print(st_pre)
print("number of stdeunts present",len(st_pre))
#print(li)
t=Text(root)
def semester():
sem_sec=sem_ent.get()
if sem_sec in "4A":
fname1="4A.csv"
elif sem_sec in "4B":
fname1="4B.csv"
elif sem_sec in "4C":
fname1="4C.csv"
elif sem_sec in "6A":
fname1="6A.csv"
elif sem_sec in "6B":
fname1="6B.csv"
elif sem_sec in "6C":
fname1="6C.csv"
elif sem_sec in "8A":
fname1="8A.csv"
elif sem_sec in "8B":
fname1="8B.csv"
else:
fname1="8C.csv"
with open(fname1)as n_4A:
csv_read=csv.reader(n_4A,delimiter=",")
for k in st_pre:
for j in csv_read:
print(k,j)
# print(li)
#print("list of absentees")
btn2=Button(root,text='List of absentees',command=lambda:display())
btn2.pack(side=TOP,pady=30)
def display():
t.insert(END,"List of Absentees"+'\t'+'\n')
t.insert(END,"-----------------------"+'\n')
t.insert(END,"USN "+"\t"+"NAME"+'\n')
t.insert(END,"-----------------------"+'\n')
for x in li:
t.insert(END, x[0]+'\t'+x[1] + '\n')
t.insert(END,"-----------------------"+'\n')
t.insert(END,"Number of Absentees="+'\t'+str(len(li))+'\n')
t.pack()
def simple_table(spacing=1):
pdf = FPDF()
pdf.set_font("Arial", size=12)
pdf.add_page()
pdf.cell(200, 10, txt="List of Absentees Department of Computer science and Engineering\n ", ln=1, align="C")
col_width = pdf.w / 2.5
row_height = pdf.font_size
head=[['USN','NAME']];
for row in head:
for item1 in row:
pdf.cell(col_width, row_height*spacing,
txt=item1, border=1,align="C")
pdf.ln(row_height*spacing)
for row in li:
for item in row:
pdf.cell(col_width, row_height*spacing,
txt=item, border=1,align="C")
pdf.ln(row_height*spacing)
to=str(date.today())
pdf.cell(200, 10, txt="Computer Generated Report on"+"\t"+str(datetime.now()), ln=1, align="C")
col_width = pdf.w / 3.0
fname=sem_ent.get()+to+'.pdf'
pdf.output(fname)
#btn = Button(root, text ='Open', command = lambda:open_file())
btn1=Button(root,text='Open a Text File',command=lambda:gen_absen())
btn1.pack(side=TOP,pady=20)
Lab_1=Label(root,text="Enter semester with Section(Example:4A)");
sem_ent=Entry(root)
Lab_1.pack(side=TOP,pady=25)
sem_ent.pack(side=TOP,pady=5)
bt3=Button(root,text='ok',command=lambda:semester())
bt3.pack(side=TOP,pady=0)
#Lab_1=Label(root,text="Enter semester");
#sem_ent=Entry(root)
#btn2=Button(root,text='List of absentees',command=lambda:display())
#btn.pack(side = TOP, pady = 10)
#Lab_1.pack(side=TOP,pady=25)
#sem_ent.pack(side=TOP,pady=5)
label = Label(root, text='Designed by Mr.Vijay C.P Assitant Professor Department of CSE,Sahyadri College of Engineering and Managment', font='Helvetica 12 bold')
label.pack(side=BOTTOM,pady=0)
bt4=Button(root,text='Generate PDF',command=lambda:simple_table())
bt4.pack(side=BOTTOM,pady=20)
root.mainloop()
"""with open('4A.csv')as n_4A:
csv_reader=cs.reader(n_4A,delimiter=",")
for j in csv_reader:
print(j)""" |
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, silhouette_samples
import pandas as pd
import datetime
import math
import numpy as np
import matplotlib.pyplot as plt
import Common_module.Common_module as CM
retailDF = pd.read_excel(io='C:\\Users\\hana\\Desktop\\Online Retail.xlsx')
print(retailDF.info())
retailDF = retailDF[retailDF['Quantity']>0]
retailDF = retailDF[retailDF['UnitPrice']>0]
retailDF = retailDF[retailDF['CustomerID'].notnull()]
print(retailDF.shape)
print(retailDF.isnull().sum())
print(retailDF['Country'].value_counts()[:5])
retailDF = retailDF[retailDF['Country']=='United Kingdom']
print(retailDF.shape)
retailDF['sale_amount'] = retailDF['Quantity'] * retailDF['UnitPrice']
retailDF['CustomerID'] = retailDF['CustomerID'].astype(int)
print(retailDF['CustomerID'].value_counts().head())
print(retailDF.groupby('CustomerID')['sale_amount'].sum().sort_values(ascending=False)[:5])
aggregations = {
'InvoiceDate': 'max',
'InvoiceNo': 'count',
'sale_amount': 'sum'
}
cust_df = retailDF.groupby('CustomerID').agg(aggregations)
cust_df = cust_df.rename(columns={'InvoiceDate':'Recency', 'InvoiceNo':'Frequency', 'sale_amount':'Monetary'})
cust_df = cust_df.reset_index()
cust_df['Recency'] = datetime.datetime(2021,10,2) - cust_df['Recency']
cust_df['Recency'] = cust_df['Recency'].apply(lambda x:x.days+1)
print(cust_df)
X_feature = cust_df[['Recency', 'Frequency', 'Monetary']].values
X_feature_scaled = StandardScaler().fit_transform(X_feature)
kmeans = KMeans(n_clusters=3, random_state=0)
labels = kmeans.fit_predict((X_feature_scaled))
cust_df['cluster_label'] = labels
print(silhouette_score(X_feature_scaled, labels))
CM.visualize_silhouette([2,3,4,5], X_feature_scaled)
|
#!/usr/bin/python
import boto
import os
import json
import time
from lib.con import Con
from lib.ec2 import Ec2
from lib.config_writer import ConfigWriter
from boto.ec2.connection import EC2Connection
from boto.ec2.regioninfo import *
def pp(_json):
print json.dumps(_json, indent=2, sort_keys=True)
class InstanceLauncer:
def __init__(self):
proj_prefix = os.environ['PROJECT_PREFIX']
f = open(os.environ["{}_BOTO_CONFIG_FILE".format(proj_prefix)], 'r')
self.config = json.loads(f.read())
self.conn = Con().conn
self.ec2 = Ec2(self.conn)
def _launch_instances(self):
missing_nodes = self.ec2.missing_nodes()
self.created_nodes = self.ec2.start_instances(missing_nodes)
print '\nAll instances are running\n'
def _write_config(self):
config_writer = ConfigWriter(self.ec2.get_nodes_hydrated())
config_writer.write_hosts_file()
config_writer.write_ssh_config()
config_writer.write_deployed_config()
def run(self):
self._launch_instances()
self.ec2.print_non_term_tagged_inst_info()
self._write_config()
InstanceLauncer().run()
|
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from registration.signals import user_registered, user_activated
from datawinners.accountmanagement.post_registration_events import ngo_user_created
from datawinners.accountmanagement.post_activation_events import initialize_organization
from datawinners.accountmanagement.user import is_project_manager, is_extended_user, is_ngo_admin, has_higher_privileges_than
user_registered.connect(ngo_user_created)
user_activated.connect(initialize_organization)
|
# Simple tetris program! v0.2
# D. Crandall, Sept 2016
from AnimatedTetris import *
from SimpleTetris import *
# from kbinput import *
import time, sys
import copy
class HumanPlayer:
def get_moves(self, tetris):
print "Type a sequence of moves using: \n b for move left \n m for move right \n n for rotation\nThen press enter. E.g.: bbbnn\n"
moves = raw_input()
return moves
def control_game(self, tetris):
while 1:
c = get_char_keyboard()
commands = { "b": tetris.left, "n": tetris.rotate, "m": tetris.right, " ": tetris.down }
commands[c]()
#####
# This is the part you'll want to modify!
# Replace our super simple algorithm with something better
#
class ComputerPlayer:
# This function should generate a series of commands to move the piece into the "optimal"
# position. The commands are a string of letters, where b and m represent left and right, respectively,
# and n rotates. tetris is an object that lets you inspect the board, e.g.:
# - tetris.col, tetris.row have the current column and row of the upper-left corner of the
# falling piece
# - tetris.get_piece() is the current piece, tetris.get_next_piece() is the next piece after that
# - tetris.left(), tetris.right(), tetris.down(), and tetris.rotate() can be called to actually
# issue game commands
# - tetris.get_board() returns the current state of the board, as a list of strings.
#
def get_moves(self, tetris):
tetris_temp = copy.deepcopy(tetris)
all_comb = []
curr_piece = tetris_temp.get_piece()
all_comb.append(curr_piece[0])
for g in range(3):
tetris_temp.rotate()
if tetris_temp.get_piece()[0] not in all_comb:
all_comb.append(tetris_temp.get_piece()[0])
piece = tetris_temp.get_piece()
eval_dict = {}
for r in range(len(all_comb)):
for c in range(10- len(all_comb[r][0])+1):
tetris_temp = copy.deepcopy(tetris)
tetris_temp.piece = all_comb[r]
tetris_temp.col = c
tetris_temp.row = 0
try:
tetris_temp.down()
except:
print "Next value"
print tetris_temp.get_board()
continue
board = tetris_temp.get_board()
# board1 = tetris.place_piece((board,0), piece, 0, 0)
num_holes = 0
lines_cleared = 0
holes = []
for i in range(1,len(board)):
line = 0
for j in range(len(board[i])):
if board[i][j] == ' ':
for k in range(1,i):
if board[i-k][j] == 'x':
num_holes += 1
holes.append([i,j])
break
if board[i][j] == 'x':
line += 1
if line == 10:
lines_cleared += 1
max_h = 0
for t in range(20):
if tetris.get_board()[t][c] == 'x':
max_h = 20-t
break
else:
max_h = 0
l_height = max_h + len(all_comb[r])/2
m = -200
for i in range(len(board)):
for j in range(len(board[i])-1):
if board[i][j] == 'x':
if m < j:
m = j
n = 200
for i in range(len(board[0])):
for j in range(len(board)-1):
if board[j][i] == 'x':
if n > j:
n = j
h = []
sum_heights = 0
bumpiness = 0
q = -10
for i in range(len(board[0])):
for j in range(len(board)):
if board[j][i] == 'x':
h.append(20-j)
break
if j == 19 and board[19][i] == ' ':
h.append(0)
sum_heights = sum(h)
for b in range(len(h)-1):
bumpiness += abs(h[b] - h[b+1])
row_trans = 0
if m > 0:
for i in range(n,20):
for j in range(m):
if board[i][j] == 'x' and board[i][j+1] == ' ':
row_trans += 1
elif board[i][j] == ' ' and board[i][j+1] == 'x':
row_trans += 1
# if (j == 0 or j == 9) and board[i][j] == ' ':
# row_trans += 1
col_trans = 0
if n < 19:
for i in range(m+1):
for j in range(n-1,19):
if board[j][i] == 'x' and board[j+1][i] == ' ':
col_trans += 1
elif board[j][i] == ' ' and board[j+1][i] == 'x':
col_trans += 1
# if j == 0 and board[j][i] == ' ':
# col_trans += 1
well_sums = 0
for i in range(len(board[0])):
count2 = 0
for j in range((len(board))):
if board[j][i] == ' ':
for u in range(1,20-j):
if i == 9:
if board[j+u][i] == ' ' and board[j+u][i-1] == 'x':
count2 += 1
elif board[j+u][i] == 'x':
break
if i== 0:
if board[j+u][i] == ' ' and board[j+u][i+1] == 'x':
count2 += 1
elif board[j+u][i] == 'x':
break
if 0<i<9:
if board[j+u][i] == ' ' and board[j+u][i-1] == 'x' and board[j+u][i+1] == 'x':
count2 += 1
elif board[j+u][i] == 'x':
break
well_sums += count2
eval = -4.500158825082766*l_height + 3.4181268101392694*lines_cleared - 3.2178882868487753*row_trans - 9.348695305445199*col_trans - 7.899265427351652*num_holes - 3.3855972247263626*well_sums
# eval = -.510066*max(h) + .760666*lines_cleared - .35663*num_holes - .184483*bumpiness
# eval = -max(h) + 4*lines_cleared - 3*num_holes - bumpiness
# eval = .760666*lines_cleared - .35663*num_holes - .184483*bumpiness
# eval = -(max(h) - lines_cleared + num_holes + well_sums)
if eval not in eval_dict:
eval_dict[eval] = [all_comb[r], tetris.col, c, num_holes, lines_cleared, row_trans, col_trans, l_height, well_sums]
else:
eval_dict[eval].append([all_comb[r], tetris.col, c, num_holes, lines_cleared, row_trans, col_trans, l_height, well_sums])
max1 = -1000000
for key in eval_dict:
if max1 < key:
max1 = key
temp_tetris1 = copy.deepcopy(tetris)
str = ""
if len(eval_dict) > 0:
if (eval_dict[max1][1] - eval_dict[max1][2]) < 0:
for v in range(abs((eval_dict[max1][1] - eval_dict[max1][2]))):
str += 'm'
elif (eval_dict[max1][1] - eval_dict[max1][2]) > 0:
for v in range(abs((eval_dict[max1][1] - eval_dict[max1][2]))):
str += 'b'
if temp_tetris1.piece != eval_dict[max1][0]:
for z in range(3):
temp_tetris1.rotate()
if temp_tetris1.get_piece()[0] != eval_dict[max1][0]:
str += 'n'
if temp_tetris1.get_piece()[0] == eval_dict[max1][0]:
str += 'n'
break
test = random.choice("mnb") * random.randint(1, 10)
# super simple current algorithm: just randomly move left, right, and rotate a few times
return str
# This is the version that's used by the animted version. This is really similar to get_moves,
# except that it runs as a separate thread and you should access various methods and data in
# the "tetris" object to control the movement. In particular:
# - tetris.col, tetris.row have the current column and row of the upper-left corner of the
# falling piece
# - tetris.get_piece() is the current piece, tetris.get_next_piece() is the next piece after that
# - tetris.left(), tetris.right(), tetris.down(), and tetris.rotate() can be called to actually
# issue game commands
# - tetris.get_board() returns the current state of the board, as a list of strings.
#
def control_game(self, tetris):
# another super simple algorithm: just move piece to the least-full column
while 1:
time.sleep(0.1)
board = tetris.get_board()
column_heights = [ min([ r for r in range(len(board)-1, 0, -1) if board[r][c] == "x" ] + [100,] ) for c in range(0, len(board[0]) ) ]
index = column_heights.index(max(column_heights))
if(index < tetris.col):
tetris.left()
elif(index > tetris.col):
tetris.right()
else:
tetris.down()
###################
#### main program
# (player_opt, interface_opt) = sys.argv[1:3]
player_opt = 'computer'
interface_opt = 'simple'
try:
if player_opt == "human":
player = HumanPlayer()
elif player_opt == "computer":
player = ComputerPlayer()
else:
print "unknown player!"
if interface_opt == "simple":
tetris = SimpleTetris()
elif interface_opt == "animated":
tetris = AnimatedTetris()
else:
print "unknown interface!"
tetris.start_game(player)
except EndOfGame as s:
print "\n\n\n", s |
# Generated by Django 2.0 on 2018-06-19 10:42
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_blogpage_image'),
]
operations = [
migrations.CreateModel(
name='BlogCategoryBlogPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AlterModelOptions(
name='blogcategory',
options={'ordering': ['name'], 'verbose_name': 'Category', 'verbose_name_plural': 'Categories'},
),
migrations.AddField(
model_name='blogcategory',
name='description',
field=models.CharField(blank=True, max_length=500),
),
migrations.AddField(
model_name='blogcategoryblogpage',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='blog.BlogCategory', verbose_name='Category'),
),
migrations.AddField(
model_name='blogcategoryblogpage',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='blog.BlogPage'),
),
migrations.AddField(
model_name='blogpage',
name='blog_categories',
field=models.ManyToManyField(blank=True, through='blog.BlogCategoryBlogPage', to='blog.BlogCategory'),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 14 21:18:39 2020
@author: Lenovo
"""
from yahoo_fin import stock_info as si
from pygame import mixer
import tkinter as tk
import pandas as pd
pd.options.mode.chained_assignment = None
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 300, height = 300) # create the canvas
canvas1.pack()
entry1 = tk.Entry (root) # create the entry box
entry2 = tk.Entry (root)
canvas1.create_window(150, 50, window=entry1)
canvas1.create_window(150, 210, window=entry2)
def insert_number(): # add a function/command to be called by the button (i.e., button1 below)
global x1 # add 'global' before the variable x1, so that you can use that variable outside of the command/function if ever needed
global x2
x1 = str(entry1.get()) # store the data input by the user as a variable x1
x2 = str(entry2.get())
button1 = tk.Button (root, text='Input stock Ticker (XXXX) ',command=insert_number, bg='blue', fg='white') # button to call the 'inserter' command above
button2 = tk.Button (root, text='Stock level', command=insert_number, bg='green', fg='white')
canvas1.create_window(150, 80, window=button1)
canvas1.create_window(150, 240, window=button2)
root.mainloop()
mixer.init()
mixer.music.load("alert.mp3.mp3")
while 650 < si.get_live_price(f'{x1}') <= 1600:
if si.get_live_price(f'{x1}') == 2:
continue
print(si.get_live_price(f'{x1}'))
print('Stock Alert, Level Hit.')
mixer.music.play() |
# Created by: Jenny Trac
# Created on: Dec 2017
# Created for: ICS3U
# This scene shows the gave over screen.
from scene import *
import ui
import config
from game_scene import *
from main_menu_scene import *
class GameOverScene(Scene):
def setup(self):
# this method is called, when user moves to this scene
self.CENTRE_OF_SCREEN = self.size / 2
# add space background
self.background = SpriteNode('./assets/sprites/dark_space_background.PNG',
position = self.CENTRE_OF_SCREEN,
parent = self,
size = self.size)
# paused title
game_over_label_position = self.CENTRE_OF_SCREEN
game_over_label_position.y = self.size.y - 100
self.game_over_label = LabelNode(text = "Game Over",
font = ('ChalkboardSE-Light', 120),
parent = self,
position = game_over_label_position)
# main menu label
main_menu_label_position = self.size / 2
self.main_menu_label = LabelNode(text = "Main menu",
position = main_menu_label_position,
parent = self,
font = ('ChalkboardSE-Light', 80))
# main menu game button
main_menu_button_position = Vector2()
main_menu_button_position.x = 220
main_menu_button_position.y = self.size.y / 2
self.main_menu_button = SpriteNode('./assets/sprites/arrow_button.PNG',
position = main_menu_button_position,
parent = self,
scale = 0.15)
# score
score_label_position = Vector2()
score_label_position.x = self.size.x / 4
score_label_position.y = self.size.y - 260
self.score_label = LabelNode("Score: " + str(config.score),
position = score_label_position,
parent = self,
font = ('ChalkboardSE-Light', 60))
# highscore
if config.score > config.high_score:
config.high_score = config.score
# highscore label
highscore_label_position = Vector2()
highscore_label_position.x = (self.size.x * 3 / 4) - 50
highscore_label_position.y = self.size.y - 260
self.highscore_label = LabelNode("Highscore: " + str(config.high_score),
position = highscore_label_position,
parent = self,
font = ('ChalkboardSE-Light', 60))
def update(self):
# this method is called, hopefully, 60 times a second
pass
def touch_began(self, touch):
# this method is called, when user touches the screen
pass
def touch_moved(self, touch):
# this method is called, when user moves a finger around on the screen
pass
def touch_ended(self, touch):
# this method is called, when user releases a finger from the screen
#pass
# main menu button
if self.main_menu_button.frame.contains_point(touch.location) or self.main_menu_label.frame.contains_point(touch.location):
# change game status to over then close scene
config.game_over = True
self.dismiss_modal_scene()
def did_change_size(self):
# this method is called, when user changes the orientation of the screen
# thus changing the size of each dimension
pass
def pause(self):
# this method is called, when user touches the home button
# save anything before app is put to background
pass
def resume(self):
# this method is called, when user place app from background
# back into use. Reload anything you might need.
pass
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tango_with_django_project.settings')
import django
django.setup()
from rango.models import Category, Page
def populate():
python_pages = [{'title': 'Monster House', 'url': 'http://127.0.0.1:8000/rango/about/', 'views': 3}, {'title': 'ELI', 'url': 'http://127.0.0.1:8000/rango/about1/', 'views': 4}]
django_pages = [{'title': 'READY OR NOT', 'url': 'http://127.0.0.1:8000/rango/about2/', 'views': 6}, {'title': 'Spiral', 'url': 'http://127.0.0.1:8000/rango/about3/', 'views': 7}]
other_pages = [{'title': 'THE CONJURING', 'url': 'http://127.0.0.1:8000/rango/about4/', 'views': 9}, {'title': 'Corpse Bride', 'url': 'http://127.0.0.1:8000/rango/about5/', 'views': 10}]
cats = {'PGlevel 13': {'pages': python_pages, 'views': 128, 'likes': 64}, 'PGlevel 15': {'pages': django_pages, 'views': 64, 'likes': 32}, 'PGlevel 17': {'pages': other_pages, 'views': 32, 'likes': 16}}
for cat, cat_data in cats.items():
c = add_cat(cat, cat_data['views'], cat_data['likes'])
for p in cat_data['pages']:
p = add_page(c, p['title'], p['url'], p['views'])
for c in Category.objects.all():
for p in Page.objects.filter(category=c):
print(f'- {c}: {p}')
def add_page(cat, title, url, views):
p = Page.objects.get_or_create(category=cat, title=title, views=views)[0]
p.url = url
# p.views = views
p.save()
return p
def add_cat(name, views, likes):
c = Category.objects.get_or_create(name=name, views=views, likes=likes)[0]
# c.views = views
# c.likes = likes
c.save()
return c
if __name__ == '__main__':
print('Starting Rango population script...')
populate() |
from random import randint
from parent import Parent
class Calculate_route(Parent):
def __init__(self):
self.route = {"11, 22":"point1", "33, 44":"point2"}
def get(self):
return self.route
def __call__(self):
self.calc_route()
def calc_route(self):
number_1 = 0
number_2 = 0
point = 0
s = str(number_1) + ", " + str(number_2)
_dict = {s:"point_"+str(point)}
n = randint(1,3)
j = 1
while j < n:
number_1 += randint(1,32)
number_2 += randint(1,32)
point = j
s = str(number_1) + ", " + str(number_2)
temp = {s:"point_"+str(point)}
_dict.update(temp)
j += 1
self.route = _dict
if __name__ == '__main__':
a = Calculate_route()
a.calc_route()
print(a.get_route()) |
from abc import ABC
import torch
import torch.nn as nn
import random
from module.facial_inpaint.base_inpaint import BaseNetwork
from module import networks as networks
from module.loss.loss import RelativisticAverageLoss, PerceptualLoss, StyleLoss
# test
import numpy as np
import os
import cv2
class FacialInpaint(BaseNetwork, ABC):
def __init__(self, opt, facial_fea_names, facial_fea_attr_names, facial_fea_attr_len):
super(FacialInpaint, self).__init__(opt=opt)
self.facial_fea_attr_len = facial_fea_attr_len
self.region_encoder = opt.region_encoder
self.G = networks.define_G_DMFB(facial_fea_names, facial_fea_attr_names, facial_fea_attr_len, opt.add_noise,
opt.spade_segmap, opt.latent_vector_size, opt.skip_type, opt.region_encoder,
opt.is_spectral_norm, opt.gpu_ids, opt.norm_type, opt.init_type, opt.init_gain)
self.model_names.append('G')
if self.is_train:
if opt.local_dis:
self.L = networks.define_D_DMFB(input_nc=3, gpu_ids=opt.gpu_ids, norm_type=opt.norm_type)
self.model_names.append('L')
if self.region_encoder:
self.D = networks.define_D_DMFB(input_nc=3, gpu_ids=opt.gpu_ids, norm_type=opt.norm_type)
else:
self.D = networks.define_D_Classifier_DMFB(facial_fea_names, facial_fea_attr_len,
gpu_ids=opt.gpu_ids)
self.model_names.append('D')
if self.is_train:
# define loss functions
self.criterionGAN = RelativisticAverageLoss()
self.criterionL1 = torch.nn.L1Loss()
self.criterionL2 = torch.nn.MSELoss()
self.PerceptualLoss = PerceptualLoss()
self.StyleLoss = StyleLoss()
# optional field
if not self.region_encoder:
self.criterionClassifier = nn.BCEWithLogitsLoss()
# initialize optimizers
self.optimizer_G = torch.optim.Adam(self.G.parameters(),
lr=opt.lr, betas=(opt.adam_beta, 0.999))
self.optimizer_D = torch.optim.Adam(self.D.parameters(),
lr=opt.lr, betas=(opt.adam_beta, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
# optional field
if opt.local_dis:
self.optimizer_L = torch.optim.Adam(self.L.parameters(),
lr=opt.lr, betas=(opt.adam_beta, 0.999))
self.optimizers.append(self.optimizer_L)
for optimizer in self.optimizers:
self.schedulers.append(networks.get_scheduler(optimizer, opt))
print('---------- Networks initialized -------------')
networks.print_network(self.G)
networks.print_network(self.D)
# optional field
if opt.local_dis:
networks.print_network(self.L)
print('-----------------------------------------------')
if opt.continue_train:
print('Loading pre-trained network for train!')
self.load_networks(opt.which_epoch)
if not self.is_train:
print('Loading pre-trained network for test!')
self.load_networks(opt.which_epoch)
for model in self.model_names:
getattr(self, model).eval()
for para in getattr(self, model).parameters():
para.requires_grad = False
def set_input(self, inputs):
file_name, image_gt, mask, segmap, attr_matrix = inputs
self.file_name = file_name
self.image_gt = image_gt.to(self.device)
self.segmap = segmap.to(self.device)
self.attr_matrix = attr_matrix.to(self.device)
# define local area which send to the local discriminator
if self.opt.local_dis:
self.local_gt = image_gt.to(self.device)
self.crop_x = random.randint(0, 191)
self.crop_y = random.randint(0, 191)
self.local_gt = self.local_gt[:, :, self.crop_x:self.crop_x + 64, self.crop_y:self.crop_y + 64]
# mask
self.mask = mask.to(self.device)
# mask 0 is hole
self.inv_ex_mask = torch.add(torch.neg(self.mask.float()), 1).float()
# Do not set the mask regions as 0
self.input = image_gt.to(self.device)
self.input.narrow(1, 0, 1).masked_fill_(self.mask.narrow(1, 0, 1).bool(), 2 * 123.0 / 255.0 - 1.0)
self.input.narrow(1, 1, 1).masked_fill_(self.mask.narrow(1, 0, 1).bool(), 2 * 104.0 / 255.0 - 1.0)
self.input.narrow(1, 2, 1).masked_fill_(self.mask.narrow(1, 0, 1).bool(), 2 * 117.0 / 255.0 - 1.0)
def forward(self):
self.fake_out = self.G(self.input, self.segmap, self.mask, self.attr_matrix)
def backward_d(self):
real = self.image_gt
fake = self.fake_out
# Global Discriminator
pred_fake, cla_fake = self.D(fake.detach(), self.segmap)
pred_real, cla_real = self.D(real, self.segmap)
self.loss_D_Global = self.criterionGAN(pred_real, pred_fake, True)
# optional field
# Local Discriminator
if self.opt.local_dis:
real_local = self.local_gt
fake_local = self.fake_out[:, :, self.crop_x:self.crop_x + 64, self.crop_y:self.crop_y + 64]
pred_fake_l, _ = self.L(fake_local.detach())
pred_real_l, _ = self.L(real_local)
self.loss_D_Local = self.criterionGAN(pred_fake_l, pred_real_l, True)
# finally Generator loss
self.loss_D_GAN = self.loss_D_Global + self.loss_D_Local
# optional field
# Discriminator classified loss
if not self.region_encoder:
self.loss_D_fake_CLA = self.get_cla_loss(cla_fake)
self.loss_D_real_CLA = self.get_cla_loss(cla_real)
self.loss_D_CLA = self.loss_D_real_CLA + self.loss_D_fake_CLA
self.loss_D = self.loss_D_GAN + self.loss_D_CLA
self.loss_D.backward()
def backward_g(self):
real = self.image_gt
fake = self.fake_out
# First, Reconstruction loss, style loss, L1 loss
self.loss_L1 = self.criterionL1(fake, real)
self.Perceptual_loss = self.PerceptualLoss(fake, real)
self.Style_Loss = self.StyleLoss(fake, real)
# Second, The generator should fake the discriminator
# Global discriminator
pred_real, cla_real = self.D(real, self.segmap)
pred_fake, cla_fake = self.D(fake, self.segmap)
self.loss_G_Global = self.criterionGAN(pred_real, pred_fake, False)
# optional field
# Local discriminator
if self.opt.local_dis:
real_local = self.local_gt
fake_local = self.fake_out[:, :, self.crop_x:self.crop_x + 64, self.crop_y:self.crop_y + 64]
pred_real_l, _ = self.L(real_local)
pred_fake_l, _ = self.L(fake_local)
self.loss_G_Local = self.criterionGAN(pred_real_l, pred_fake_l, False)
self.loss_G_GAN = self.loss_G_Global + self.loss_G_Local
# Third, Generator classified loss
if not self.region_encoder:
self.loss_G_fake_CLA = self.get_cla_loss(cla_fake)
self.loss_G_real_CLA = self.get_cla_loss(cla_real)
self.loss_G_CLA = self.loss_G_real_CLA + self.loss_G_fake_CLA
# finally Generator loss
self.loss_G = self.loss_L1 * 1 + self.Perceptual_loss * 0.2 + self.Style_Loss * 250 \
+ self.loss_G_GAN * 0.2 + self.loss_G_CLA
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
# Optimize the D and L first
self.set_requires_grad(self.G, False)
self.set_requires_grad(self.D, True)
self.optimizer_D.zero_grad()
if self.opt.local_dis:
self.set_requires_grad(self.L, True)
self.optimizer_L.zero_grad()
self.backward_d()
self.optimizer_D.step()
if self.opt.local_dis:
self.optimizer_L.step()
# Optimize G
self.set_requires_grad(self.G, True)
self.set_requires_grad(self.D, False)
if self.opt.local_dis:
self.set_requires_grad(self.L, False)
self.optimizer_G.zero_grad()
self.backward_g()
self.optimizer_G.step()
def get_current_errors(self):
# show the current loss
loss_dict = {
# discriminator
'loss_D_Global': self.loss_D_Global.data,
'loss_D_GAN': self.loss_D_GAN.data,
'loss_D': self.loss_D.data,
# Generator
'loss_G_Global': self.loss_G_Global.data,
'loss_G_GAN': self.loss_G_GAN,
'loss_L1': self.loss_L1.data,
'Perceptual_loss': self.Perceptual_loss.data,
'Style_Loss': self.Style_Loss.data,
'loss_G': self.loss_G.data
}
if self.opt.local_dis:
loss_dict.update({"loss_D_Local": self.loss_D_Local})
loss_dict.update({"loss_G_Local": self.loss_G_Local})
if not self.region_encoder:
loss_dict.update({"loss_G_fake_CLA": self.loss_G_fake_CLA.data})
loss_dict.update({"loss_G_real_CLA": self.loss_G_real_CLA.data})
loss_dict.update({"loss_D_fake_CLA": self.loss_D_fake_CLA.data})
loss_dict.update({"loss_D_real_CLA": self.loss_D_real_CLA.data})
return loss_dict
def get_current_visuals(self):
input_image = (self.input.data.cpu() + 1) / 2.0
fake_image = (self.fake_out.data.cpu() + 1) / 2.0
real_gt = (self.image_gt.data.cpu() + 1) / 2.0
return input_image, fake_image, real_gt
def get_cla_loss(self, net_out):
cla_loss = 0
for idx, facial_fea_cls_pred in enumerate(net_out):
cla_loss = cla_loss + self.criterionClassifier(facial_fea_cls_pred,
self.attr_matrix[:, idx, 0:self.facial_fea_attr_len[idx]])
return cla_loss
def test(self):
self.forward()
input_image, fake_image, real_gt = self.get_current_visuals()
# chw -> hwc
gt = real_gt.numpy().transpose((0, 2, 3, 1)) * 255
output = fake_image.numpy().transpose((0, 2, 3, 1)) * 255
inputs = input_image.numpy().transpose((0, 2, 3, 1)) * 255
mask = self.mask.cpu().numpy().transpose((0, 2, 3, 1)) * 255
for idx in range(output.shape[0]):
# rgb -> bgr
save_gt = gt[idx][..., ::-1].astype(np.uint8)
save_output = output[idx][..., ::-1].astype(np.uint8)
save_inputs = inputs[idx][..., ::-1].astype(np.uint8)
mask_inputs = mask[idx][..., ::-1].astype(np.uint8)
# save path
fake_save_path = os.path.join(self.opt.results_fake_dir, self.file_name[idx])
gt_save_path = os.path.join(self.opt.results_gt_dir, self.file_name[idx])
inputs_save_path = os.path.join(self.opt.results_input_dir, self.file_name[idx])
mask_save_path = os.path.join(self.opt.results_mask_dir, self.file_name[idx])
# save
cv2.imwrite(fake_save_path, save_output)
cv2.imwrite(gt_save_path, save_gt)
cv2.imwrite(inputs_save_path, save_inputs)
cv2.imwrite(mask_save_path, mask_inputs)
|
def red_stripes(image_matrix):
matrix = []
c=0
for row in range(len(image_matrix)):
arr1 = []
for column in range(len(image_matrix[row])):
arr = list(image_matrix[row][column])
if c>=50 and c<=100:
if c == 100:
c = 0
arr = tuple(arr)
arr1.append(arr)
else:
arr[0] = 255
arr = tuple(arr)
arr1.append(arr)
c+=1
matrix.append(arr1)
return matrix
def grayscale(image_matrix):
matrix = []
for row in range(len(image_matrix)):
add=0
arr1 = []
for column in range(len(image_matrix[row])):
arr = list(image_matrix[row][column])
add = arr[0]+arr[1]+arr[2]
split_up = add//3
arr[0] = split_up
arr[1] = split_up
arr[2] = split_up
arr = tuple(arr)
arr1.append(arr)
matrix.append(arr1)
return matrix
def invert_colors(image_matrix):
matrix = []
for row in range(len(image_matrix)):
arr1 = []
for column in range(len(image_matrix[row])):
arr = list(image_matrix[row][column])
diff = 255-arr[0]
diff1 = 255-arr[1]
diff2 = 255 - arr[2]
arr[0] = diff
arr[1] = diff1
arr[2] = diff2
arr = tuple(arr)
arr1.append(arr)
matrix.append(arr1)
return matrix
def flip(image_matrix):
image_matrix.reverse()
return(image_matrix)
def blur(image_matrix):
matrix = []
for row in range(len(image_matrix)):
karr = []
for column in range(len(image_matrix[row])):
if row == 0 and column == 0:
arr=list(image_matrix[row][column])
arr0=list(image_matrix[row][column+1])
arr1=list(image_matrix[row+1][column+1])
arr2=list(image_matrix[row+1][column])
red = (arr[0] + arr0[0] + arr1[0] + arr2[0])//4
green = (arr[1] + arr0[1] + arr1[1] + arr2[1])//4
blue = (arr[2] + arr0[2] + arr1[2] + arr2[2])//4
elif row == 0 and column+1 == len(image_matrix[row]):
arr=list(image_matrix[row][column])
arr0=list(image_matrix[row][column-1])
arr1=list(image_matrix[row+1][column-1])
arr2=list(image_matrix[row+1][column])
red = (arr[0] + arr0[0] + arr1[0] + arr2[0])//4
green = (arr[1] + arr0[1] + arr1[1] + arr2[1])//4
blue = (arr[2] + arr0[2] + arr1[2] + arr2[2])//4
elif row+1 ==len(image_matrix) and column == 0:
arr=list(image_matrix[row][column])
arr0=list(image_matrix[row][column+1])
arr1=list(image_matrix[row-1][column+1])
arr2=list(image_matrix[row-1][column])
red = (arr[0] + arr0[0] + arr1[0] + arr2[0])//4
green = (arr[1] + arr0[1] + arr1[1] + arr2[1])//4
blue = (arr[2] + arr0[2] + arr1[2] + arr2[2])//4
elif row+1 == len(image_matrix) and column+1 == len(image_matrix[row]):
arr=list(image_matrix[row][column])
arr0=list(image_matrix[row][column-1])
arr1=list(image_matrix[row-1][column-1])
arr2=list(image_matrix[row-1][column])
red = (arr[0] + arr0[0] + arr1[0] + arr2[0])//4
green = (arr[1] + arr0[1] + arr1[1] + arr2[1])//4
blue = (arr[2] + arr0[2] + arr1[2] + arr2[2])//4
elif row+1 == len(image_matrix):
arr=list(image_matrix[row][column])
arr0=list(image_matrix[row-1][column])
arr1=list(image_matrix[row-1][column-1])
arr2=list(image_matrix[row-1][column+1])
arr3=list(image_matrix[row][column+1])
arr4=list(image_matrix[row][column-1])
red = (arr[0] + arr0[0] + arr1[0] + arr2[0] + arr3[0] + arr4[0])//6
green = (arr[1] + arr0[1] + arr1[1] + arr2[1] + arr3[1] + arr4[1])//6
blue = (arr[2] + arr0[2] + arr1[2] + arr2[2] + arr3[2] + arr4[2])//6
elif column+1 == len(image_matrix[row]):
arr=list(image_matrix[row][column])
arr0=list(image_matrix[row-1][column])
arr1=list(image_matrix[row-1][column-1])
arr2=list(image_matrix[row][column-1])
arr3=list(image_matrix[row+1][column-1])
arr4=list(image_matrix[row+1][column])
red = (arr[0] + arr0[0] + arr1[0] + arr2[0] + arr3[0] + arr4[0])//6
green = (arr[1] + arr0[1] + arr1[1] + arr2[1] + arr3[1] + arr4[1])//6
blue = (arr[2] + arr0[2] + arr1[2] + arr2[2] + arr3[2] + arr4[2])//6
elif row == 0:
arr=list(image_matrix[row][column])
arr0=list(image_matrix[row+1][column])
arr1=list(image_matrix[row+1][column-1])
arr2=list(image_matrix[row+1][column+1])
arr3=list(image_matrix[row][column+1])
arr4=list(image_matrix[row][column-1])
red = (arr[0] + arr0[0] + arr1[0] + arr2[0] + arr3[0] + arr4[0])//6
green = (arr[1] + arr0[1] + arr1[1] + arr2[1] + arr3[1] + arr4[1])//6
blue = (arr[2] + arr0[2] + arr1[2] + arr2[2] + arr3[2] + arr4[2])//6
elif column == 0:
arr=list(image_matrix[row][column])
arr0=list(image_matrix[row-1][column])
arr1=list(image_matrix[row-1][column+1])
arr2=list(image_matrix[row][column+1])
arr3=list(image_matrix[row+1][column+1])
arr4=list(image_matrix[row+1][column])
red = (arr[0] + arr0[0] + arr1[0] + arr2[0] + arr3[0] + arr4[0])//6
green = (arr[1] + arr0[1] + arr1[1] + arr2[1] + arr3[1] + arr4[1])//6
blue = (arr[2] + arr0[2] + arr1[2] + arr2[2] + arr3[2] + arr4[2])//6
else:
arr=list(image_matrix[row][column])
arr0=list(image_matrix[row][column+1])
arr1=list(image_matrix[row+1][column])
arr2=list(image_matrix[row+1][column+1])
arr3=list(image_matrix[row-1][column-1])
arr4=list(image_matrix[row-1][column])
arr5=list(image_matrix[row][column-1])
arr6=list(image_matrix[row-1][column+1])
arr7=list(image_matrix[row+1][column-1])
red = (arr[0]+ arr0[0] + arr1[0] + arr2[0] + arr3[0] + arr4[0] + arr5[0] + arr6[0] + arr7[0])//9
green = (arr[1]+ arr0[1] + arr1[1] + arr2[1] + arr3[1] + arr4[1] + arr5[1] + arr6[1] + arr7[1])//9
blue = (arr[2]+ arr0[2] + arr1[2] + arr2[2] + arr3[2] + arr4[2] + arr5[2] + arr6[2] + arr7[2])//9
tup=(red,green,blue)
karr.append(tup)
matrix.append(karr)
return matrix
def sepia(image_matrix):
matrix = []
for row in range(len(image_matrix)):
arr1 = []
for column in range(len(image_matrix[row])):
arr=list(image_matrix[row][column])
red = (arr[0]*.393)+(arr[1]*.769)+(arr[2]*.189)
red = int(red)
green = (arr[0]*.349)+(arr[1]*.686)+(arr[2]*.168)
green = int(green)
blue = (arr[0]*.272)+(arr[1]*.534)+(arr[2]*.131)
blue = int(blue)
if red > 255:
red = 255
if blue > 255:
blue = 255
if green > 255:
green = 255
arr[0] = red
arr[1] = green
arr[2] = blue
arr=tuple(arr)
arr1.append(arr)
matrix.append(arr1)
return(matrix)
def threshold(image_matrix,
red_threshold=(0, 255),
green_threshold=(0, 255),
blue_threshold=(0, 255)):
matrix = []
for row in range(len(image_matrix)):
arr1 = []
for column in range(len(image_matrix[row])):
arr=list(image_matrix[row][column])
if arr[0] > red_threshold[1] or arr[0] < red_threshold[0]:
arr[0] = 0
arr[1] = 0
arr[2] = 0
if arr[1] > green_threshold[1] or arr[1] < green_threshold[0]:
arr[0] = 0
arr[1] = 0
arr[2] = 0
if arr[2] > blue_threshold[1] or arr[2] < blue_threshold[0]:
arr[0]=0
arr[1]=0
arr[2]=0
arr = tuple(arr)
arr1.append(arr)
matrix.append(arr1)
return(matrix)
|
import time
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://www.python.org/accounts/login/")
wait = WebDriverWait(driver, 60) # Waits until webpage completely loads
driver.find_element_by_xpath("//input[@id='id_login']").send_keys("admin")
time.sleep(2)
driver.find_element_by_xpath("//input[@id='id_password']").send_keys("admin123")
time.sleep(2)
element = wait.until(EC.element_to_be_clickable((By.XPATH, "//input[@id='id_remember']")))
element.click()
header = driver.find_element_by_xpath("//h1[contains(text(),'Sign In')]").text
print("Title of the page: ", header)
time.sleep(2)
driver.quit() |
#!/usr/bin/env python
from Turbine import Turbine
from cone_height import cone_height
from math import tan, radians
__author__ = 'dmytro'
def cone_radius(t1=Turbine(0, 0, 0), t2=Turbine(0, 0, 0)):
"""
Diameter of wind flow cone at the second turbine
:param t1: first turbine
:param t2: second turbine
:return: diameter in [meters]
"""
return round(t1.d / 2 + cone_height(t1, t2) * tan(radians(4.5)), 1)
if __name__ == "__main__":
pass |
import threading
import time
now = lambda :time.time()
def work(internal):
name = threading.current_thread().name
print(f"{name} start")
time.sleep(internal)
print(f"{name} end")
t1 = now()
print("Main: ", threading.current_thread().name)
for i in range(5):
thread_instance = threading.Thread(target=work, args=(i, ))
thread_instance.start()
# 可规定主线程在子线程后退出
if i == 4:
thread_instance.join()
print(f"Main: end, Time: {now() - t1}")
|
#!/usr/bin/python
import sys
import re
import email
count = 0; flist = []
msg = email.message_from_file(sys.stdin)
if msg.is_multipart():
for part in msg.walk():
ctype = part.get_content_type()
if re.match("application/.*excel", ctype):
fname = part.get_filename(); flist.append(fname); count += 1
fd = open(fname, "w")
fd.write(part.get_payload(decode=True))
fd.close()
print msg["From"]
print '\n'.join(flist)
|
import re
import copy
class RobotStateMachine():
_current_state = None
_machine_code = None
def __init__(self, machine_code):
self._machine_code = machine_code
self._current_state = "0"
def current_state(self):
return self._current_state
def tick(self, left_state, right_state):
if not self._current_state in self._machine_code:
# robot is stuck in this state until the end
# but here for edge cases / programming
#print('not finding a matching state')
#print(self._current_state)
#print('---')
#print(self._machine_code)
return
current_logic = self._machine_code[self._current_state]
target = "%s-%s" % (left_state, right_state)
found = False
for key,val in current_logic.items():
try:
if re.search(key, target):
found = True
break
except Exception as e:
raise Exception("Failed on %s" % key, e)
if found:
#print('chaning my state to %s' % val)
self._current_state = val
# print('not found match rule!')
# if not found, maintain state
def __str__(self):
return str(self._current_state)
def construct_army(ar):
mystr = ""
for machine in ar:
mystr += "%s" % str(machine)[0]
return mystr
def tick_army(ar):
states = []
for machine in ar:
states.append(machine.current_state())
states = copy.deepcopy(states)
for machineid in range(len(ar)):
if machineid == 0:
left = "-2"
right = states[1]
elif machineid == len(ar)-1:
left= states[machineid-1]
right = "-2"
else:
left= states[machineid-1]
right = states[machineid+1]
ar[machineid].tick(left,right)
def end_game_check(army):
fire= False
for machine in army:
if machine._current_state == "BANG":
fire = True
return fire |
whole_deck = "abcdefg"
my_card = "h"
print('Looking for card', my_card,'among', whole_deck)
top = len(whole_deck)
bottom = 0
while bottom < top:
print('bottom =', bottom, 'top =', top,
'- remaining cards', whole_deck[bottom:top])
middle = (top+bottom)//2
if whole_deck[middle] == my_card:
break
elif my_card < whole_deck[middle]:
top = middle-1;
else:
# my_card > whole_deck[middle]
bottom = middle+1
print('Card', my_card, 'is at position', middle)
|
from flask import g
from timeless.access_control import (
administrator_privileges, manager_privileges, other_privileges,
owner_privileges, director_privileges, unknown_privileges)
def is_allowed(method=None, resource=None, *args, **kwargs) -> bool:
""" Check if user can access particular resource for a given method.
Additional information needed for authorization can be passed through
args or kwargs. This method is meant to work in conjunction with
SecuredView.dispatch_request so that all available information about
a user view can be accessible in the authorization process.
@todo #358:30min Change checking of this statement below
`name = g.user.role.name` to `name = g.user.role.role_type` and fix all
tests, in all tests should be provided role_type instead of name of Role
model. Example below:
manager_role = factories.RoleFactory(
name="manager", role_type=RoleType.Manager)
me = factories.EmployeeFactory(company=my_company, role=manager_role)
"""
if not g.user or not g.user.role:
name = "unknown"
else:
name = g.user.role.name
return __roles[name].has_privilege(
method=method, resource=resource, *args, **kwargs
)
__roles = {
"owner": owner_privileges,
"manager": manager_privileges,
"director": director_privileges,
"administrator": administrator_privileges,
"other": other_privileges,
"unknown": unknown_privileges
}
|
# -*- coding: utf-8 -*-
"""
The Omniforms app
"""
from __future__ import unicode_literals
VERSION = ['0', '4', '0']
def get_version():
"""
Returns the version string for the omni forms package
:return: Version string
"""
return '.'.join(VERSION)
|
import random
from tiles import grass, water, sand, empty
# TODO: make this more interesting and faster
def generate_map(width: int, height: int):
return [[rand_tile(x_, y_, width, height) for y_ in range(0, height)] for x_ in range(0, width)]
def rand_tile(x, y, max_x, max_y):
if x == 0 or y == 0 or x == max_x - 1 or y == max_y - 1:
return empty
rng = random.randint(0, 100)
if rng > 95:
return water
elif rng > 75:
return sand
else:
return grass
|
import sys
import os
__author__ = 'mameri'
'''
Report the max score for each sample in the score file
'''
def make_dir_out(p_dir_out):
if not os.path.exists(p_dir_out):
os.mkdir(p_dir_out)
def get_score_file_list(p_dir_in):
top_list = [file_item for file_item in os.listdir(p_dir_in)
if file_item[-14:] == 'gmm.result.top']
return top_list
def merge_max_score(p_dir_in, p_file_in_list, p_dir_out, p_file_out, p_file_sorted_out):
file_in_full =[ os.path.join(p_dir_in, file_in) for file_in in p_file_in_list ]
file_out_full = os.path.join(dir_out, p_file_out)
file_handle_list = map(lambda file_i: open(file_i), file_in_full)
lines_list = [file_handle.readlines() for file_handle in file_handle_list]
word_count = len(lines_list[0])
class_count = len(lines_list)
map(lambda file_i: file_i.close(), file_handle_list)
score_max_list = []
with open(file_out_full, 'w') as f_out:
for word_j in range(0,word_count):
score_of_word_j = []
for class_i in range(0, class_count):
score_of_word_j.append(lines_list[class_i][word_j].split() )
sorted_word_list = sorted(score_of_word_j, key=lambda x:float(x[4]), reverse=True)
word_j_max = sorted_word_list[0]
# print reduce(lambda x, y : max(x[4], y[4]), score_of_word_j)
max_word_str = word_j_max[2][: word_j_max[2].find('_')]
word_j_max_str = '{0} {1}\n'.format(max_word_str, word_j_max[4])
f_out.write(word_j_max_str)
score_max_list.append(word_j_max_str)
sorted_score_max_list= sorted(score_max_list, key= lambda x: float(x.split()[1]), reverse=True)
file_out_full_sorted = os.path.join(dir_out, p_file_sorted_out)
with open(file_out_full_sorted, 'w') as f_sorted:
f_sorted.writelines(sorted_score_max_list)
# print file_handle_list[0].readline()
# print file_handle_list[0].readline()
if __name__ == '__main__':
if len(sys.argv) > 1:
argv = sys.argv[1:]
dir_in = argv[0]
dir_out = argv[1]
make_dir_out(dir_out)
score_file_list = get_score_file_list(dir_in)
print len(score_file_list), score_file_list[:]
merge_max_score(dir_in, score_file_list, dir_out, 'max_score.txt', 'max_score_sorted.txt')
# lines = read_scores_to_list(dir_in)
# print len(lines), len(lines)/16
# sorted_lines = lines
# sorted_lines = sorted(lines, key=lambda x:float(x.split()[4]), reverse= True )
# print sorted_lines
# write_scores_to_file(sorted_lines, dir_out)
else:
print 'call in_score_dir out_score_dir '
|
import argparse
import process_games
import process_players
import process_game_stats
import data_utils
from datetime import datetime
def main(args):
if not args.date:
date = datetime.now()
else:
date = datetime.strptime(args.date, '%Y-%m-%d')
# if there are games unfinished that are final but have not been processed
# fetch them to get their data (eg. game ended after 12pm EST)
unfinished_games = process_games.get_unfinished_games()
# get all games and their status for a given date and insert to db
process_games.process(date)
if args.game_meta_only:
return
# get games that have not been processed for stats
games_stats = data_utils.get_unprocessed_games_stats()
# process stats
if games_stats:
run_stats(games_stats)
else:
print('No new games to process')
if unfinished_games:
run_stats(unfinished_games)
print('Updated {count} games'.format(count=len(unfinished_games)))
else:
print('No games to update')
if args.player or args.all:
# get games that have not been processed for players
games_players = data_utils.get_unprocessed_games_player()
# process players
if games_players:
run_players(games_players)
else:
print('No unprocessed games for players')
def run_stats(games):
stats_separate = [
process_game_stats.parse_stats(game)
for game in games
]
stats = data_utils.join_units(stats_separate)
process_game_stats.upsert(stats)
data_utils.mark_games_processed(games, 'stats')
def run_players(games):
players_separate = [
process_players.parse_players(game) for game in games
]
players = data_utils.join_units(players_separate)
process_players.player_insert_and_update(players)
data_utils.mark_games_processed(games, 'players')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--date', help='date to run', type=str)
parser.add_argument('-g', '--game_meta_only',
help='only process game metadata',
action="store_true")
parser.add_argument('-a', '--all', help='run all etl', action='store_true')
parser.add_argument('-b', '--batting',
help='run etl for player_batting',
action='store_true')
parser.add_argument('-p', '--player',
help='run etl for player dimension',
action='store_true')
main(parser.parse_args())
|
#!/usr/bin/env python
"""
Resize a folder of images (including subfolders) to be no
larger than a specified size.
"""
from PIL import Image, ImageFilter
import os, sys
import json
import functools
# need to fix colorspace issues
import io
from PIL import Image
from PIL import ImageCms
supported_ext = ['.jpeg','.jpg','.png']
max_width = 800
max_height = 600
WRITE_IMAGES = True
def convert_to_srgb(img):
'''Convert PIL image to sRGB color space (if possible)'''
icc = img.info.get('icc_profile', '')
if icc:
io_handle = io.BytesIO(icc) # virtual file
src_profile = ImageCms.ImageCmsProfile(io_handle)
dst_profile = ImageCms.createProfile('sRGB')
img = ImageCms.profileToProfile(img, src_profile, dst_profile)
return img
# From https://stackoverflow.com/questions/4228530/pil-thumbnail-is-rotating-my-image
def image_transpose_exif(im):
"""
Apply Image.transpose to ensure 0th row of pixels is at the visual
top of the image, and 0th column is the visual left-hand side.
Return the original image if unable to determine the orientation.
As per CIPA DC-008-2012, the orientation field contains an integer,
1 through 8. Other values are reserved.
Parameters
----------
im: PIL.Image
The image to be rotated.
"""
exif_orientation_tag = 0x0112
exif_transpose_sequences = [ # Val 0th row 0th col
[], # 0 (reserved)
[], # 1 top left
[Image.FLIP_LEFT_RIGHT], # 2 top right
[Image.ROTATE_180], # 3 bottom right
[Image.FLIP_TOP_BOTTOM], # 4 bottom left
[Image.FLIP_LEFT_RIGHT, Image.ROTATE_90], # 5 left top
[Image.ROTATE_270], # 6 right top
[Image.FLIP_TOP_BOTTOM, Image.ROTATE_90], # 7 right bottom
[Image.ROTATE_90], # 8 left bottom
]
try:
seq = exif_transpose_sequences[im._getexif()[exif_orientation_tag]]
except Exception:
return im
else:
return functools.reduce(type(im).transpose, seq, im)
def resizeImage(path, out_dir, max_width=max_width, max_height=max_height):
im = Image.open(path)
im = image_transpose_exif(im) # deal with EXIF rotation metadata
bn = os.path.basename(path)
ext = os.path.splitext(bn)[1]
size = im.size
# find shrink ratio
ratio = min(float(max_width) / size[0], float(max_height) / size[1])
new_image_size = tuple([int(x*ratio) for x in size])
if WRITE_IMAGES:
im = convert_to_srgb(im)
# jpeg's cant have alpha but somehow it is happening... drop alpha and warn
if ext.lower() in ['.jpeg', '.jpg'] and im.mode == "RGBA":
print "WARNING: found JPG with alpha channel??? dropping channel %s" % path
im = im.convert("RGB")
new_im = im.resize(new_image_size, Image.LANCZOS)
new_im.save(os.path.join(out_dir, bn), optimize=True)
else:
print 'WARNING: RESIZE DISABLED!'
return ratio
if len(sys.argv) < 3:
print "usage: %s inDir outDir [max_width max_height]" % sys.argv[0]
print " default max_width=%d max_height=%d" % (max_width, max_height)
sys.exit(1)
inDir = sys.argv[1]
outBase = sys.argv[2]
if len(sys.argv) > 3:
max_width = int(sys.argv[3])
if len(sys.argv) > 4:
max_height = int(sys.argv[4])
for dirpath, dirnames, filenames in os.walk(inDir):
for filename in [f for f in filenames if os.path.splitext(f)[1].lower() in supported_ext]:
infn = os.path.join(dirpath, filename)
outdir = os.path.join(outBase, dirpath)
print 'Found:', infn
try:
os.makedirs(outdir)
except OSError:
pass
resizeImage(infn, outdir, max_width, max_height)
|
a=input("введите число")
first_h=a[0:len(a)//2]
second_h=""
if len(a)%2==0:
second_h=a[len(a)//2:]
else:
second_h=a[len(a)//2+1:]
if first_h==second_h[::-1]:
print("это палиндром")
else:
print("это не палиндром")
|
import logging
import json
import copy
import pickle
import os
import sys
import torch
import numpy as np
import argparse
from utils import load_dataSets
from args import init_arg_parser, init_config
from transformers import *
import random
from interval import Interval
from tqdm import tqdm
import pandas as pd
from nltk.grammar import Nonterminal
from nltk.grammar import Production
from translate import Translator
from RuleGenerator import RuleGenerator
from shared_variables import COL_TYPE, TAB_TYPE, RULE_TYPE, NONE_TYPE, PAD_id, EOS_id, SOS_id, VAL_TYPE
from logger import get_logger
from parser import FromSQLParser, Rule
from utils import *
from nltk.parse.stanford import StanfordParser
from tree import TreeWithPara
logger = get_logger('create_dataset.log')
class Batch(object):
def __init__(self, examples, grammar, args):
if args.cuda:
self.device = torch.device('cuda', args.cuda_device_num)
else:
self.device = torch.device('cpu')
self.examples = examples
self.interactions = [e.interactions for e in self.examples]
self.table_sents_word = [[" ".join(x) for x in e.tab_cols] for e in self.examples]
self.schema_sents_word = [[" ".join(x) for x in e.table_names] for e in self.examples]
self.table_sents = [e.tab_cols for e in self.examples]
self.col_num = [e.col_num for e in self.examples]
self.tab_ids = [e.tab_ids for e in self.examples]
self.table_names = [e.table_names for e in self.examples]
self.table_len = [e.table_len for e in examples]
self.col_table_dict = [e.col_table_dict for e in examples]
self.table_col_name = [e.table_col_name for e in examples]
self.table_col_len = [e.table_col_len for e in examples]
self.col_pred = [e.col_pred for e in examples]
self.col_set = [e.col_set for e in examples]
self.db_ids = [e.db_id for e in examples]
self.origin_table_names = [e.origin_table_name for e in examples]
self.origin_column_names = [e.origin_column_name for e in examples]
self.origin_column_names = [e.origin_column_name for e in examples]
self.column_names = [e.column_name for e in examples]
self.grammar = grammar
# self.src_sents_mask = [[self.generate_mask([u.src_sent_len], args.max_src_seq_length)
# for u in e.utterance_features]
# for e in self.examples]
self.col_set_mask = self.generate_mask(self.col_num, args.max_col_length)
self.table_mask = self.generate_mask(self.table_len, args.max_table_length)
def generate_mask(self, lengthes, max_length):
b_mask = []
for l in lengthes:
if l <= max_length:
mask = [1]*l
padding = [0]*(max_length-l)
mask.extend(padding)
b_mask.append(mask)
else:
raise Exception('token length exceed max length')
mask = torch.tensor(b_mask, dtype=torch.long, device=self.device)
return mask
def to_tensor(self, obj):
tensor_obj = torch.tensor(np.asarray(obj).squeeze(),
dtype=torch.long,
device=self.device)
return tensor_obj
class BatchExample(object):
def __init__(self, examples, batch_size, grammar, args, drop_last=True, shuffle=True):
self.batch_size = batch_size
self.shuffle = shuffle
self.drop_last = drop_last
if len(examples) > 1:
print(examples)
self.batched_examples = self.to_batch(examples)
else:
self.batched_examples = [examples]
self.batches = []
for e in self.batched_examples:
batch = Batch(e, grammar, args)
self.batches.append(batch)
def to_batch(self, examples):
batched_example = []
if self.shuffle:
perm = np.random.permutation(len(examples))
else:
perm = np.arange(len(examples))
st = 0
while st < len(examples):
ed = st + self.batch_size if st + self.batch_size < len(perm) else len(perm)
one_batch = []
for i in range(st, ed):
example = examples[perm[i]]
one_batch.append(example)
batched_example.append(one_batch)
st = ed
return batched_example
class UtteranceFeature:
"""
"""
def __init__(self, src_sent, tgt_actions=None, sql=None, one_hot_type=None, col_hot_type=None,
tokenized_src_sent=None, pre_actions=None, parent_actions=None,
question_input_feature=None, masked_target_action=None, masked_pre_action=None,
masked_parent_action=None, masked_index=None, zh_src=None, fr_src=None, processed_ast=None,
src_sent_ner=None, copy_ast_arg=None, src_sent_origin = None, bert_features=None, group_by=None, max_seq_length=None):
self.src_sent = src_sent
self.src_sent_len = len(src_sent)
self.src_sent_origin = src_sent_origin
self.tokenized_src_sent = tokenized_src_sent
self.sql = sql
self.one_hot_type = one_hot_type
self.col_hot_type = col_hot_type
self.tgt_actions = self.to_tensor(tgt_actions) if tgt_actions is not None else None
self.pre_actions = self.to_tensor(pre_actions) if pre_actions is not None else None
self.parent_actions = self.to_tensor(parent_actions) if parent_actions is not None else None
self.question_input_feature = question_input_feature
self.masked_target_actions = self.to_tensor(masked_target_action) if masked_target_action is not None else None
self.masked_pre_actions = self.to_tensor(masked_pre_action) if masked_pre_action is not None else None
self.masked_parent_actions = self.to_tensor(masked_parent_action) if masked_parent_action is not None else None
self.masked_index = masked_index if masked_index is not None else None
self.src_sent_mask = self.generate_mask([self.src_sent_len], max_seq_length)
self.zh_src = zh_src
self.fr_src = fr_src
self.processed_ast = processed_ast
self.src_sent_ner = src_sent_ner
self.copy_ast_arg = copy_ast_arg
self.bert_features = bert_features
self.group_by = group_by
def to_tensor(self, obj):
tensor_obj = torch.tensor(np.asarray(obj).squeeze(),
dtype=torch.long)
return tensor_obj
def generate_mask(self, lengthes, max_length):
b_mask = []
for l in lengthes:
if l <= max_length:
mask = [1]*l
padding = [0]*(max_length-l)
mask.extend(padding)
b_mask.append(mask)
else:
raise Exception('token length exceed max length')
mask = torch.tensor(b_mask, dtype=torch.long)
return mask
class Feature:
def __init__(self, vis_seq=None, tab_cols=None, col_num=None, schema_len=None, tab_ids=None,
table_names=None, table_len=None, col_table_dict=None, cols=None,
table_col_name=None, table_col_len=None, col_set=None,
col_pred=None, interactions=None, db_id=None, origin_table_name=None,
origin_column_name=None, column_name=None, group_by=None):
self.vis_seq = vis_seq
self.tab_cols = tab_cols
self.col_num = col_num
self.schema_len = schema_len
self.tab_ids = tab_ids
self.table_names = table_names
self.table_len = table_len
self.col_table_dict = col_table_dict
self.cols = cols
self.table_col_name = table_col_name
self.table_col_len = table_col_len
self.col_pred = col_pred
self.col_set = col_set
self.interactions = interactions
self.db_id = db_id
self.origin_table_name = origin_table_name
self.origin_column_name = origin_column_name
self.column_name = column_name
self.group_by = group_by
class Action(object):
def __init__(self, act_type, rule_id, table_id, column_id, action_seq_mask, val_id_start=0, val_id_end=0):
self.act_type = act_type
self.rule_id = rule_id
self.table_id = table_id
self.column_id = column_id
self.action_seq_mask = action_seq_mask
self.val_id_start = val_id_start
self.val_id_end = val_id_end
class QuestionInputFeature(object):
def __init__(self, question_input_ids, question_input_mask, question_type_ids, target_actions=None,
pre_actions=None, parent_actions=None):
"""
:param question_input_ids:
:param question_input_mask:
:param question_type_ids:
:param target_actions:
"""
self.question_input_ids = question_input_ids
self.question_input_mask = question_input_mask
self.question_type_ids = question_type_ids
self.target_actions = target_actions
self.pre_actions = pre_actions
self.parent_actions = parent_actions
class ColumnInputFeature(object):
def __init__(self, db_ids, col_ids, col_mask, col_token_type_ids, column_input, col_table_ids):
self.db_ids = db_ids
self.col_ids = col_ids
self.col_mask = col_mask
self.col_token_type_ids = col_token_type_ids
self.column_input = column_input
self.col_table_ids = col_table_ids
class TableInputFeature(object):
def __init__(self, tab_ids, table_mask, table_token_type_ids, table_input):
self.tab_ids = tab_ids
self.table_mask = table_mask
self.table_token_type_ids = table_token_type_ids
self.table_input = table_input
class SparcProcessor(object):
def get_examples(self, args):
"""See base class.
:type args: object
"""
data, table_data = load_dataSets(args)
# data = sparc_processor._read_json(args.train_path)
# table_data = sparc_processor._read_json(args.table_path)
tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer, do_lower_case=False)
examples = convert_examples_to_features(data, table_data, tokenizer, args.bert_max_src_seq_length,
args.max_action_seq_length, args)
return examples
def get_batch(self, args):
"""See base class.
:type args: object
"""
data, table_data = load_dataSets(args)
# data = sparc_processor._read_json(args.train_path)
# table_data = sparc_processor._read_json(args.table_path)
tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer, do_lower_case=False)
examples = convert_examples_to_features(data, table_data, tokenizer, args.bert_max_src_seq_length,
args.max_action_seq_length, args)
return examples
@classmethod
def _read_tsv(cls, input_file, quote_char=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
reader = pd.read_csv(f, sep="\t", index_col=0, quoting=False)
# lines = []
# for line in reader:
# if sys.version_info[0] == 2:
# line = list(unicode(cell, 'utf-8') for cell in line)
# lines.append(line)
return reader
@classmethod
def _read_json(cls, file_path):
with open(file_path, 'r', encoding='utf-8') as f:
reader = json.load(f)
return reader
def convert_examples_to_features(data, table_data, tokenizer, max_seq_length, max_action_seq_length, args):
"""Loads a data file into a list of `InputBatch`s.
:param max_action_seq_length:
:param data:
:param tokenizer:
:param max_seq_length:
:param table_data:
:return:
"""
logger.info('Processing data:')
rules = RuleGenerator()
new_examples = []
for ex_index in tqdm(range(len(data))):
entry = data[ex_index]
db_id = entry['database_id']
process_tab_dict = process_table(table_data[db_id], entry)
col_table_dict = get_col_table_dict(process_tab_dict['tab_cols'], process_tab_dict['tab_ids'], entry)
table_col_name = get_table_colNames(process_tab_dict['tab_ids'], process_tab_dict['col_iter'])
# convert * to [count, number, many]
process_tab_dict['col_set_iter'][0] = ['count', 'number', 'many']
interactions = []
for turn_level, utterance in enumerate(entry['interaction']):
utterance_arg_origin = copy.deepcopy(utterance['origin_utterance_arg'])
actions = create_sql_input(rules, utterance, entry, max_action_seq_length, turn_level)
# print(target_actions)
process_dict = process(utterance, entry, turn_level)
# translate question to fr and zh
# zh_src = translate(process_dict['utterance_arg'], from_lang='english', to_lang='chinese')
# fr_src = translate(process_dict['utterance_arg'], from_lang='english', to_lang='french')
bert_process_dict = []
if turn_level + 1 >= args.turn_num:
for u in entry['interaction'][turn_level + 1-args.turn_num:turn_level + 1]:
tmp_process_dict = process(u, entry, turn_level)
bert_process_dict.append(tmp_process_dict)
# utterance_args.extend(u.src_sent)
# utterance_args.append([';'])
else:
for u in entry['interaction'][:turn_level + 1]:
tmp_process_dict = process(u, entry, turn_level)
bert_process_dict.append(tmp_process_dict)
np_features = create_question_np_feature(process_dict, process_tab_dict, max_seq_length, tokenizer, actions)
bert_features = create_question_np_feature(process_dict, process_tab_dict, max_seq_length, tokenizer, actions)
pretrained_tar_action = generate_pretrained_target_action(np_features['target_actions'].squeeze(0),
np_features['pre_actions'].squeeze(0),
np_features['parent_actions'].squeeze(0))
masked_target_actions = pretrained_tar_action[0]
masked_pre_actions = pretrained_tar_action[1]
masked_parent_actions = pretrained_tar_action[2]
masked_index = pretrained_tar_action[3]
# print(np_features['target_actions'])
# print(utterance['query'])
copy_ast_arg, processed_tree = prune_tree(np_features, interactions, turn_level, max_action_seq_length, utterance, process_dict)
# print(copy_ast_arg)
# ner = [stanford_ner(q) for q in process_dict['utterance_arg']]
ner = None
if 'group by' in utterance['query'] or 'GROUP BY' in utterance['query']:
is_group_by = True
else:
is_group_by = False
utterance_feature = UtteranceFeature(
src_sent=process_dict['utterance_arg'],
sql=utterance['query'],
src_sent_origin = utterance_arg_origin,
one_hot_type=process_dict['one_hot_type'],
col_hot_type=process_dict['col_set_type'],
tokenized_src_sent=process_dict['col_set_type'],
tgt_actions=np_features['target_actions'],
pre_actions=np_features['pre_actions'],
parent_actions=np_features['parent_actions'],
masked_parent_action=masked_parent_actions,
masked_pre_action=masked_pre_actions,
masked_target_action=masked_target_actions,
masked_index=masked_index,
processed_ast=processed_tree,
src_sent_ner=ner,
copy_ast_arg=copy_ast_arg,
bert_features=bert_features,
group_by=is_group_by,
max_seq_length=max_seq_length
# zh_src=zh_src,
# fr_src=fr_src
)
interactions.append(utterance_feature)
# new_feature.question_input_feature = np_features
new_feature = Feature(
col_num=len(process_tab_dict['col_set_iter']),
# deduplicate col name iter
tab_cols=process_tab_dict['col_set_iter'],
col_set=entry['col_set'],
# table_name iter
table_names=process_tab_dict['table_names'],
origin_table_name=table_data[db_id]['table_names_original'],
origin_column_name=[e[1] for e in table_data[db_id]['column_names_original']],
column_name=[e[1] for e in table_data[db_id]['column_names']],
table_len=len(process_tab_dict['table_names']),
col_table_dict=col_table_dict,
# origin cols
cols=process_tab_dict['tab_cols'],
# origin tab id
tab_ids=process_tab_dict['tab_ids'],
table_col_name=table_col_name,
table_col_len=len(table_col_name),
interactions=interactions,
db_id=db_id)
new_examples.append(new_feature)
return new_examples
def convert_batch_to_features(data, table_data, tokenizer, max_seq_length, max_action_seq_length, args):
"""Loads a data file into a list of `InputBatch`s.
:param max_action_seq_length:
:param data:
:param tokenizer:
:param max_seq_length:
:param table_data:
:return:
"""
logger.info('Processing data:')
rules = RuleGenerator()
new_examples = []
entry = data
db_id = entry['database_id']
process_tab_dict = process_table(table_data[db_id], entry)
col_table_dict = get_col_table_dict(process_tab_dict['tab_cols'], process_tab_dict['tab_ids'], entry)
table_col_name = get_table_colNames(process_tab_dict['tab_ids'], process_tab_dict['col_iter'])
# convert * to [count, number, many]
process_tab_dict['col_set_iter'][0] = ['count', 'number', 'many']
interactions = []
for turn_level, utterance in enumerate(entry['interaction']):
utterance_arg_origin = copy.deepcopy(utterance['origin_utterance_arg'])
# actions = create_sql_input(rules, utterance, entry, max_action_seq_length, turn_level)
# print(target_actions)
process_dict = process(utterance, entry, turn_level)
# translate question to fr and zh
# zh_src = translate(process_dict['utterance_arg'], from_lang='english', to_lang='chinese')
# fr_src = translate(process_dict['utterance_arg'], from_lang='english', to_lang='french')
bert_process_dict = []
if turn_level + 1 >= args.turn_num:
for u in entry['interaction'][turn_level + 1-args.turn_num:turn_level + 1]:
tmp_process_dict = process(u, entry, turn_level)
bert_process_dict.append(tmp_process_dict)
# utterance_args.extend(u.src_sent)
# utterance_args.append([';'])
else:
for u in entry['interaction'][:turn_level + 1]:
tmp_process_dict = process(u, entry, turn_level)
bert_process_dict.append(tmp_process_dict)
np_features = create_question_np_feature(process_dict, process_tab_dict, max_seq_length, tokenizer)
bert_features = create_question_np_feature(process_dict, process_tab_dict, max_seq_length, tokenizer)
# pretrained_tar_action = generate_pretrained_target_action(np_features['target_actions'].squeeze(0),
# np_features['pre_actions'].squeeze(0),
# np_features['parent_actions'].squeeze(0))
# masked_target_actions = pretrained_tar_action[0]
# masked_pre_actions = pretrained_tar_action[1]
# masked_parent_actions = pretrained_tar_action[2]
# masked_index = pretrained_tar_action[3]
# print(np_features['target_actions'])
# print(utterance['query'])
# copy_ast_arg, processed_tree = prune_tree(np_features, interactions, turn_level, max_action_seq_length, utterance, process_dict)
# print(copy_ast_arg)
# ner = [stanford_ner(q) for q in process_dict['utterance_arg']]
# if 'group by' in utterance['query'] or 'GROUP BY' in utterance['query']:
# is_group_by = True
# else:
# is_group_by = False
utterance_feature = UtteranceFeature(
src_sent=process_dict['utterance_arg'],
sql=None,
src_sent_origin=utterance_arg_origin,
one_hot_type=process_dict['one_hot_type'],
col_hot_type=process_dict['col_set_type'],
tokenized_src_sent=process_dict['col_set_type'],
tgt_actions=None,
pre_actions=None,
parent_actions=None,
masked_parent_action=None,
masked_pre_action=None,
masked_target_action=None,
masked_index=None,
processed_ast=None,
src_sent_ner=None,
copy_ast_arg=None,
bert_features=bert_features,
group_by=None,
max_seq_length=max_seq_length
# zh_src=zh_src,
# fr_src=fr_src
)
interactions.append(utterance_feature)
# new_feature.question_input_feature = np_features
new_feature = Feature(
col_num=len(process_tab_dict['col_set_iter']),
# deduplicate col name iter
tab_cols=process_tab_dict['col_set_iter'],
col_set=entry['col_set'],
# table_name iter
table_names=process_tab_dict['table_names'],
origin_table_name=table_data[db_id]['table_names_original'],
origin_column_name=[e[1] for e in table_data[db_id]['column_names_original']],
column_name=[e[1] for e in table_data[db_id]['column_names']],
table_len=len(process_tab_dict['table_names']),
col_table_dict=col_table_dict,
# origin cols
cols=process_tab_dict['tab_cols'],
# origin tab id
tab_ids=process_tab_dict['tab_ids'],
table_col_name=table_col_name,
table_col_len=len(table_col_name),
interactions=interactions,
db_id=db_id)
new_examples.append(new_feature)
rule = RuleGenerator()
batch = BatchExample(new_examples, batch_size=1, grammar=rule.rule_dict, args=args, shuffle=False)
return batch
def to_batch(examples, batch_size, shuffle=True, drop_last=True):
batched_example = []
if shuffle:
perm = np.random.permutation(len(examples))
else:
perm = np.arange(len(examples))
st = 0
while st < len(examples):
ed = st+batch_size if batch_size < len(perm) else len(perm)
one_batch = []
for i in range(st, ed):
example = examples[perm[i]]
one_batch.append(example)
batched_example.append(one_batch)
return batched_example
def convert_feature_to_dict(*features):
"""
:param features: features list
:return: dict_feature feature
"""
dict_feature = {}
for feature in features:
for idx, f in enumerate(feature):
f_dict = f.__dict__
for k, v in f_dict.items():
if idx == 0:
dict_feature[k] = []
# dict_feature[k].append(v)
dict_feature[k].append(v)
return dict_feature
def padding_schema_input(input, max_length):
padded_input = copy.deepcopy(input)
if len(input) > max_length:
padded_input= input[0:max_length]
else:
padded_input += ['PAD'] * (max_length - len(input))
return padded_input
def process_question_input(question_tokens, question_one_hot_type, column_names, table_names, max_seq_length, tokenizer):
""" ************************ create question input ***********************
The convention in BERT is:
(a) For sequence pairs:
tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
(b) For single sequences:
tokens: [CLS] the dog is hairy . [SEP]
type_ids: 0 0 0 0 0 0 0
Where "type_ids" are used to indicate whether this is the first
sequence or the second sequence. The rule_embedding vectors for `type=0` and
`type=1` were learned during pre-training and are added to the wordpiece
rule_embedding vector (and position vector). This is not *strictly* necessary
since the [SEP] token unambiguously separates the sequences, but it makes
it easier for the model to learn the concept of sequences.
For classification tasks, the first vector (corresponding to [CLS]) is
used as as the "sentence vector". Note that this only makes sense because
the entire model is fine-tuned.
Question_tokens:['Give', 'the', 'average', 'number', 'of', ['working', 'horses'], 'on', 'farms']
Insert ['CLS'] at the beginning of question tokens and '[SEP]' in the end
After insertion: ['[CLS]' 'Give', 'the', 'average', 'number', 'of', ['working', 'horses'], 'on', 'farms','[SEP]']
"""
question_one_hot_type = _truncate_seq_pair(question_tokens, column_names, table_names, question_one_hot_type, max_seq_length)
padding_one_hot = np.zeros([1, question_one_hot_type.shape[1]])
question_tokens.insert(0, ['[CLS]'])
question_tokens.append(['[SEP]'])
question_one_hot_type = np.insert(question_one_hot_type, 0, padding_one_hot, 0)
question_one_hot_type = np.append(question_one_hot_type, padding_one_hot, 0)
# Column names: ['*', ['city', 'id'], ['official', 'name'], 'status']
# Insert [SEP] tokens between every two column names
# After insertion:
# ['*', '[SEP]', ['city', 'id'], '[SEP]', ['official', 'name'], '[SEP]', 'status', '[SEP]']
for idx, t in enumerate((table_names, column_names)):
type_one_hot = np.eye(question_one_hot_type.shape[1])[idx]
type = [type_one_hot]*len(t)
type = np.stack(type)
for i in range(len(t)):
t.insert((2 * (i + 1) - 1), ['[SEP]'])
type = np.insert(type, (2 * (i + 1) - 1), padding_one_hot, 0)
# column_id.insert((2 * (i + 1) - 1), 0)
question_one_hot_type = np.append(question_one_hot_type, type, 0)
# Concatenate question_tokens with column names
inputs = copy.deepcopy(question_tokens)
inputs.extend(column_names)
inputs.extend(table_names)
# Convert input tokens to ids
input_ids = convert_input_to_ids(inputs, tokenizer)
# create input_mask and padding
input_mask = [1] * len(input_ids)
if len(input_ids) < max_seq_length:
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
if len(inputs) < max_seq_length:
type_padding = [padding_one_hot.squeeze(0)] * (max_seq_length - len(inputs))
# question_one_hot_type += padding
# print(question_one_hot_type)
question_one_hot_type = np.append(question_one_hot_type, type_padding, 0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(question_one_hot_type) == max_seq_length
return input_ids, input_mask, question_one_hot_type
def create_schema_input(inputs, input_token_type_ids, input_max_seq_length, tokenizer, col_tab_ids=None):
_truncate_tab_col_seq_pair(inputs, input_token_type_ids, input_max_seq_length, col_tab_ids)
# see notes in method declaration
expand_tokens_with_sep(inputs, input_token_type_ids, col_tab_ids)
# convert table input to ids
ids = convert_input_to_ids(inputs, input_token_type_ids, tokenizer, col_tab_ids)
input_ids = ids[0]
input_token_type_ids = ids[1]
# create table input mask and padding
input_mask = [1] * len(input_token_type_ids)
table_padding = [0] * (input_max_seq_length - len(input_token_type_ids))
if col_tab_ids is not None:
col_tab_ids = ids[2]
col_tab_ids_padding = [0] * (input_max_seq_length - len(col_tab_ids))
col_tab_ids += col_tab_ids_padding
assert len(col_tab_ids) == input_max_seq_length
# expand the input with padding to max length
input_ids += table_padding
input_mask += table_padding
input_token_type_ids += table_padding
# assertion
assert len(input_ids) == input_max_seq_length
assert len(input_mask) == input_max_seq_length
assert len(input_token_type_ids) == input_max_seq_length
if col_tab_ids is None:
return input_ids, input_mask, input_token_type_ids
else:
return input_ids, input_mask, input_token_type_ids, col_tab_ids
def create_sql_input(rules, utterance, entry, max_action_seq_length, turn_level):
rule_seq = []
rule_stack = []
# print(example['sql'])
# print(utterance['utterance_arg'])
# ner
parser = FromSQLParser(utterance['sql'], utterance['origin_utterance_arg'], entry['interaction'], turn_level)
# rule_seq.append(None)
# print(utterance['query'])
rule_seq, rule_stack = parser.parse_sql(root=None,
rules=rules,
column_names=entry['names'],
table_names=entry['table_names'],
col_set=entry['col_set'],
rule_seq=rule_seq,
rule_stack=rule_stack,
table_ids=entry['col_table'],
is_root=True)
parent_rules = []
for rule in rule_seq:
if rule.parent is None:
parent_rules.append(None)
else:
parent_rules.append(rule.parent.rule)
pre_rules = []
for rule in rule_seq:
if rule.pre is None:
pre_rules.append(None)
else:
pre_rules.append(rule.pre.rule)
target_rules = [rule.rule for rule in rule_seq]
parent_rules = convert_rules_to_action_seq(parent_rules, rules, max_action_seq_length)
pre_rules = convert_rules_to_action_seq(pre_rules, rules, max_action_seq_length)
target_actions = convert_rules_to_action_seq(target_rules, rules, max_action_seq_length)
return target_actions, pre_rules, parent_rules
def create_question_np_feature(process_dict, process_tab_dict, max_seq_length, tokenizer, actions=None):
# Tokenize
# Treat a multi_tokens words as a single word
# Before: ['Give', 'the', 'average', 'number', 'of', 'working horses', 'on', 'farms']
# After: ['Give', 'the', 'average', 'number', 'of', ['working', 'horses'], 'on', 'farms']
# create question input
if isinstance(process_dict, list):
utterance_token_copy = []
one_hot_type_copy = []
for p in process_dict:
utterance_token_copy.extend(p['utterance_arg'])
one_hot_type_copy.append(p['one_hot_type'])
utterance_token_copy.append(['SEP'])
one_hot_type_copy.append(np.zeros([1, 6]))
one_hot_type_copy = np.concatenate(one_hot_type_copy, 0)
else:
utterance_token_copy = copy.deepcopy(process_dict['utterance_arg'])
column_name_copy = copy.deepcopy(process_dict['col_set_iter'])
table_name_copy = copy.deepcopy(process_tab_dict['table_names'])
one_hot_type_copy = copy.deepcopy(process_dict['one_hot_type'])
# *******question input for bert*******
utterance_input_ids, \
utterance_input_mask, utterance_type_ids = process_question_input(question_tokens=utterance_token_copy,
question_one_hot_type=one_hot_type_copy,
column_names=column_name_copy,
table_names=table_name_copy,
max_seq_length=max_seq_length,
tokenizer=tokenizer)
if actions:
utterance_input_feature = QuestionInputFeature(utterance_input_ids, utterance_input_mask, utterance_type_ids, *actions)
np_features = convert_to_question_np_feature(utterance_input_feature)
else:
utterance_input_feature = QuestionInputFeature(utterance_input_ids, utterance_input_mask, utterance_type_ids)
np_features = utterance_input_feature
return np_features
def convert_to_question_np_feature(utterance_input_feature):
# convert python object to numpy
dict_features = convert_feature_to_dict([utterance_input_feature])
np_features = {}
for name, obj in dict_features.items():
# convert Action object to numpy array with shape:[max_seq_action_len, action_type]
if name == 'target_actions' or name == 'pre_actions' or name == 'parent_actions':
target_actions = dict_features[name]
target_action_tmp = {}
target_seq = []
for action_seq in target_actions:
if action_seq:
keys = action_seq[0].__dict__.keys()
action_matrixs = [[action.__dict__[key] for action in action_seq] for key in keys]
matrix_trans = [np.asarray(matrix).reshape((-1, 1)) for matrix in action_matrixs]
target_seq.append(np.concatenate(matrix_trans, axis=1))
# shape: [max_seq_action_len, action_type]
np_features[name] = np.asarray(target_seq)
else:
np_features[name] = np.asarray(obj)
return np_features
def flatten_ids_list(ls):
""" Flatten the ids list
Example: [1, 1, [1,1], 1] =>[1, 1, 1, 1, 1]
:param ls: a list which likes [1, 1, [1,1], 1]
:return: flattened list which contain none list type object
"""
question_type_ids_new = []
for i in ls:
if isinstance(i, list):
question_type_ids_new.extend(i)
else:
question_type_ids_new.append(i)
return question_type_ids_new
def expand_tokens_with_sep(tokens, token_types_ids, col_tab_id=None):
""" Expand tokens
Expand tokens with '[SEP]' and '[CLS]' and expand
token type ids with 0 for token '[SEP]' and '[CLS]'
:param col_tab_id: table id corresponding to column id
:param tokens: tokens
:param token_types_ids: token type ids for tokens
:return: None
"""
for i in range(len(tokens)):
tokens.insert((2 * (i + 1) - 1), '[SEP]')
tokens.insert(0, '[CLS]')
for i in range(int((len(tokens) + 1) / 2)):
token_types_ids.insert((2 * i), 0)
# padding the col_tab_id with -1
if col_tab_id is not None:
for i in range(int((len(tokens) + 1) / 2)):
col_tab_id.insert((2 * i), -1)
# def convert_input_to_ids(inputs, input_type_ids, tokenizer, col_tab_ids=None):
# input_ids = []
# for index, token in enumerate(inputs):
# if isinstance(token, list):
# ids = tokenizer.convert_tokens_to_ids(token)
# input_ids.extend(ids)
# input_type_ids[index] = [input_type_ids[index]] * len(ids)
# if col_tab_ids is not None:
# col_tab_ids[index] = [col_tab_ids[index]] * len(ids)
#
# else:
# ids = tokenizer.convert_tokens_to_ids(token.split(' '))
# input_ids.extend(ids)
# input_type_ids = flatten_ids_list(input_type_ids)
# if col_tab_ids is not None:
# col_tab_id = flatten_ids_list(col_tab_ids)
# return input_ids, input_type_ids, col_tab_id
# else:
# return input_ids, input_type_ids
def convert_input_to_ids(inputs, tokenizer):
input_ids = []
for index, token in enumerate(inputs):
ids = tokenizer.convert_tokens_to_ids(token)
input_ids.extend(ids)
return input_ids
def _truncate_seq_pair(tokens_a, tokens_b, tokens_c, token_a_type, max_length):
"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal percent
of tokens from each, since if one sequence is very short then each token
that's truncated likely contains more information than a longer sequence.
"""
while True:
len_tok_a = len(flatten_ids_list(tokens_a))
len_tok_b = len(flatten_ids_list(tokens_b))
len_tok_c = len(flatten_ids_list(tokens_c))
total_length = len_tok_a + (len_tok_b+len(tokens_b))+(len_tok_c+len(tokens_c)) + 2
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
token_a_type = np.delete(token_a_type, -1, 0)
else:
tokens_b.pop()
return token_a_type
def _truncate_tab_col_seq_pair(tokens_a, token_type, token_max_length, col_tab_ids=None):
"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal percent
of tokens from each, since if one sequence is very short then each token
that's truncated likely contains more information than a longer sequence.
"""
while True:
len_tok_a = len(flatten_ids_list(tokens_a))
token_total_length = (len_tok_a+len(tokens_a)) + 1
if token_total_length <= token_max_length:
break
tokens_a.pop()
token_type.pop()
if col_tab_ids is not None:
col_tab_ids.pop()
def convert_rules_to_action_seq(rules: list, rules_gen: RuleGenerator, max_action_seq_length):
productions = rules_gen.grammar.productions()
rule_dict = rules_gen.get_rule_ids()
rule_dict = {v:k for k, v in rule_dict.items()}
c = Production(lhs=Nonterminal('C'), rhs=[Nonterminal('column')])
t = Production(lhs=Nonterminal('T'), rhs=[Nonterminal('t')])
y = Production(lhs=Nonterminal('Y'), rhs=[Nonterminal('val')])
c_id = rules_gen.get_column_rule_id()
t_id = rules_gen.get_table_rule_id()
y_id = rules_gen.get_value_rule_id()
actions = []
sos = Action(act_type=RULE_TYPE, rule_id=SOS_id, table_id=PAD_id, column_id=PAD_id, action_seq_mask=1)
actions.append(sos)
for rule in rules:
if rule is None:
act = Action(act_type=RULE_TYPE, rule_id=SOS_id, table_id=PAD_id, column_id=PAD_id, action_seq_mask=1)
# ids.append(ActionId(SOS_id, RULE_TYPE))
elif rule.lhs() == Nonterminal('C'):
col_id = rule.rhs()[0]
act = Action(act_type=COL_TYPE, rule_id=c_id, table_id=PAD_id, column_id=col_id, action_seq_mask=1)
# ids.append(ActionId(c_id, COL_TYPE, str(rule.rhs())))
elif rule.lhs() == Nonterminal('T'):
tab_id = rule.rhs()[0]
act = Action(act_type=TAB_TYPE, rule_id=t_id, table_id=tab_id, column_id=PAD_id, action_seq_mask=1)
# ids.append(ActionId(t_id, TAB_TYPE, str(rule.rhs())))
elif rule.lhs() == Nonterminal('Y'):
# rule.rule_id = y_id
# rule.rule_type = TOKEN_TYPE
# rule.data = str(rule.rhs())
# val_id = rule.rhs()[0]
val_id = eval(rule.rhs()[0])
# act = Action(act_type=TOKEN_TYPE, rule_id=y_id, table_id=PAD_id, column_id=PAD_id, action_seq_mask=1)
act = Action(act_type=VAL_TYPE, rule_id=y_id, table_id=PAD_id, column_id=PAD_id, action_seq_mask=1,
val_id_start=val_id[0], val_id_end=val_id[1])
# TODO copy type
# ids.append(ActionId(y_id, VAL_TYPE, str(rule.rhs())))
else:
rule_id = rule_dict[rule]
rule.rule_type = RULE_TYPE
act = Action(act_type=RULE_TYPE, rule_id=rule_id, table_id=PAD_id, column_id=PAD_id, action_seq_mask=1)
# ids.append(ActionId(rule_dict[rule],RULE_TYPE)
actions.append(act)
act = Action(act_type=RULE_TYPE, rule_id=EOS_id, table_id=PAD_id, column_id=PAD_id, action_seq_mask=1)
actions.append(act)
# expand action sequence to max action sequence length
if len(actions) > max_action_seq_length:
actions = actions[0:max_action_seq_length]
else:
while len(actions) < max_action_seq_length:
act = Action(act_type=NONE_TYPE, rule_id=PAD_id, table_id=PAD_id, column_id=PAD_id, action_seq_mask=PAD_id)
actions.append(act)
return actions
def convert_action_seq_to_str(actions):
rule_seq = [a[1] for a in actions]
rule_seq_str = [str(s) for s in rule_seq]
rule_seq_str = ','.join(rule_seq_str)
return rule_seq_str
def read_train_json(input_file):
with open(input_file, 'r', encoding="utf-8") as f:
data = json.load(f)
return data
# def convert_db_name_to_id(db_name):
# db_id_dict = read_db_id_from_file("../data/db_ids.json")
# dict = {v:k for k, v in db_id_dict.items()}
# id = dict[db_name]
# return int(id)
# def convert_id_to_db_name(db_id):
# db_id_dict = read_db_id_from_file("../data/db_ids.json")
# db_name = db_id_dict[str(db_id)]
# return db_name
def generate_pretrained_target_action(target_actions, pre_actions, parent_actions):
masked_target_actions = copy.deepcopy(target_actions)
masked_pre_actions = copy.deepcopy(pre_actions)
masked_parent_actions = copy.deepcopy(parent_actions)
masked_fill = np.zeros(7)
mask = target_actions[:, 4]
target_action_len = np.sum(mask)
masked_index = random.randint(1, target_action_len)
masked_target_actions[masked_index, :] = masked_fill
masked_pre_actions[masked_index, :] = masked_fill
masked_parent_actions[masked_index, :] = masked_fill
return masked_target_actions, masked_pre_actions, masked_parent_actions, masked_index
def translate(sents, from_lang, to_lang):
translator = Translator(from_lang=from_lang, to_lang=to_lang)
trs = [[translator.translate(w)
for w in arg]
for arg in sents]
return trs
# def stanford_ner(sents):
# parser = CoreNLPParser(url='http://localhost:9000', tagtype='ner')
# ner = parser.tag(sents)
# return ner
def prune_tree(np_features, interactions, turn_level, max_action_seq_length, utterance, process_dict):
ast = build_tree_from_action_seq(np_features['target_actions'].squeeze(0), np_features['pre_actions'].squeeze(0),
np_features['parent_actions'].squeeze(0), max_action_seq_length)
# ast.pretty_print()
processed_tree = process_ast(ast)
copy_dict = {}
if turn_level > 0:
for t, u in enumerate(interactions[:turn_level]):
query_tree = [pt[1][0] for pt in processed_tree]
# print(utterance['query'])
# print(process_dict['utterance_arg'])
value_tree = [pt[1][0] for pt in u.processed_ast]
# print(u.sql)
# print(u.src_sent)
copy_list = is_copy_from_pre(query_tree, value_tree)
if len(copy_list) > 0:
copy_list = list(copy_list)
# # only keep longest matched tree
# max_index = max((len(value_tree[pt]), i) for i, pt in enumerate(copy_list))
# copy_dict[t] = copy_list[max_index[1]]
copy_dict[t] = copy_list
copy_ast_arg = []
for k, c_l in copy_dict.items():
for v in c_l:
copy_pos = {}
origin_actions = copy.deepcopy(np_features['target_actions'].squeeze(0))
# print(origin_actions)
origin_actions_str = convert_action_seq_to_str(origin_actions.tolist())
# print(origin_actions_str)
ps = copy.deepcopy(interactions[k].processed_ast)
query_actions = [a[1][0] for a in ps]
query_actions_str = convert_action_seq_to_str(query_actions[v])
start = origin_actions_str.find(query_actions_str)
if start != -1:
# print(query_actions_str)
# print(origin_actions_str)
start = len(origin_actions_str[:start-1].split(','))
end = start + len(query_actions_str.split(','))
copy_pos['start'] = start
# same to python boundary rules, except right
copy_pos['end'] = end
# the position of ast be copied [turn_level, id]
copy_pos['arg'] = (k, v)
copy_pos['seq'] = query_actions[v]
# copy_pos['seq'] =
copy_ast_arg.append(copy_pos)
copy_args = filter_copy_args_with_seq(copy_ast_arg)
# only keep most recently turn that copied from
# if len(copy_ast_arg) > 0:
# copy_ast_arg = max(((i['arg'][0]), i) for i in copy_ast_arg)[1]
# else:
# copy_ast_arg = None
return copy_args, processed_tree
def filter_copy_args_with_seq(copy_args):
copy_args_sorted_by_len = sorted(copy_args, key=lambda x: len(x['seq']))
intervals = []
filtered_copy_args = []
for arg in copy_args_sorted_by_len:
start_pos = arg['start']
end_pos = arg['end']
intvl = Interval(start_pos, end_pos, lower_closed = True, upper_closed=False)
if not is_overlaps(intervals, intvl):
intervals.append(intvl)
filtered_copy_args.append(arg)
# for idx, arg in enumerate(filtered_copy_args):
# arg['arg'] = idx
return filtered_copy_args
def is_overlaps(intervals, interval):
# result = False
if len(intervals) > 0:
for i in intervals:
if i.overlaps(interval):
return True
return False
def build_tree_from_action_seq(target_actions, parent_actions, pre_actions, max_output_length):
r = RuleGenerator()
asts = TreeWithPara('Z', [])
cur_node = asts
for i in range(max_output_length):
i_cur_node = cur_node
if target_actions[i+1, 1] == EOS_id:
break
else:
if i_cur_node.label() == 'T':
tab_id = target_actions[i+1, 2]
i_cur_node.append(tab_id)
i_cur_node.visited = True
i_cur_node.set_rule_id(r.get_table_rule_id())
i_cur_node.target_action = target_actions[i+1, :]
i_cur_node.pre_action = pre_actions[i+1, :]
i_cur_node.parent_action = parent_actions[i+1, :]
i_cur_node = TreeWithPara.next_unvisited_b(i_cur_node)
elif i_cur_node.label() == 'C':
col_id = target_actions[i+1, 3]
i_cur_node.append(col_id)
i_cur_node.visited = True
i_cur_node.set_rule_id(r.get_column_rule_id())
i_cur_node.target_action = target_actions[i+1, :]
i_cur_node.pre_action = pre_actions[i+1, :]
i_cur_node.parent_action = parent_actions[i+1, :]
i_cur_node = TreeWithPara.next_unvisited_b(i_cur_node)
# elif i_cur_node.label() == 'Y':
#
# rule_id = target_actions[i+1, 1]
# i_cur_node.append('val')
# i_cur_node.visited = True
# i_cur_node.set_rule_id(rule_id)
# i_cur_node.target_action = target_actions[i+1, :]
# i_cur_node.pre_action = pre_actions[i+1, :]
# i_cur_node.parent_action = parent_actions[i+1, :]
# i_cur_node = TreeWithPara.next_unvisited(i_cur_node)
else:
rule_id = target_actions[i+1, 1]
rule = r.get_rule_by_index(rule_id)
# print(rule)
i_cur_node.set_rule_id(rule_id)
i_cur_node.target_action = target_actions[i+1, :]
i_cur_node.pre_action = pre_actions[i + 1, :]
i_cur_node.parent_action = parent_actions[i + 1, :]
if i == max_output_length - 1:
asts = None
else:
expand_tree(i_cur_node, rule, r.non_terminals)
if i_cur_node.is_all_visited():
i_cur_node = TreeWithPara.next_unvisited_b(i_cur_node)
else:
left_most_node = i_cur_node.left_most_child_unvisited()
i_cur_node = left_most_node
cur_node = i_cur_node
return asts
def expand_tree(node, rule, non_terminals):
rhs = list(rule.rhs())
for element in rhs:
if element in non_terminals:
node.append(TreeWithPara(str(element), []))
else:
node.append(str(element))
node.visited = True
def process_ast(ast):
# print(ast)
subtrees = list(ast.subtrees(lambda t: t.label() == 'Select' or t.label() == 'Filter' or t.label() == 'R'
or t.label() == 'A' or t.label() == 'V' or t.label() == 'X'))
# print(subtrees)
prune_result = []
for t in subtrees:
# t.pretty_print()
actions = trans_ast_to_action_seq(t)
# print(actions)
column_alignment, table_alignment = schema_alignment(actions[0])
prune_result.append((t, actions, column_alignment, table_alignment))
deduplicated_result = []
for i in prune_result:
if not contains_nparray(deduplicated_result, i):
deduplicated_result.append(i)
# t.pretty_print(maxwidth=10)
return deduplicated_result
def contains_nparray(array, item):
if len(array) > 0:
for i in array:
# print(i[1][0].shape)
# print(item[1][0].shape)
if i[1][0].shape ==item[1][0].shape:
if (i[1][0]==item[1][0]).all():
return True
# else:
# print(i[1][0].shape)
# print(item[1][0].shape)
# if i[1][0] == item[1][0]:
# return True
return False
def trans_ast_to_action_seq(ast):
# ast.pretty_print()
target_actions = []
pre_actions = []
parent_actions = []
cur_node = ast
ast.set_tree_to_default()
# print(ast.is_all_visited())
while True:
if ast.is_all_visited_b():
break
if cur_node.label() == 'T':
cur_node.visited = True
target_actions.append(cur_node.target_action)
pre_actions.append(cur_node.pre_action)
parent_actions.append(cur_node.parent_action)
cur_node = TreeWithPara.next_unvisited_b(cur_node)
elif cur_node.label() == 'C':
cur_node.visited = True
target_actions.append(cur_node.target_action)
pre_actions.append(cur_node.pre_action)
parent_actions.append(cur_node.parent_action)
cur_node = TreeWithPara.next_unvisited_b(cur_node)
else:
if cur_node.is_all_visited_b():
cur_node = TreeWithPara.next_unvisited_b(cur_node)
cur_node.visited = True
target_actions.append(cur_node.target_action)
pre_actions.append(cur_node.pre_action)
parent_actions.append(cur_node.parent_action)
else:
left_most_node = cur_node.left_most_child_unvisited_b()
cur_node = left_most_node
cur_node.visited = True
# print(cur_node.label())
# print(cur_node.target_action)
if cur_node.label() == 'T' or cur_node.label() == 'C':
pass
else:
target_actions.append(cur_node.target_action)
pre_actions.append(cur_node.pre_action)
parent_actions.append(cur_node.parent_action)
target_actions = np.asarray(target_actions)
pre_actions = np.asarray(pre_actions)
parent_actions = np.asarray(parent_actions)
return target_actions, pre_actions, parent_actions
def schema_alignment(actions):
alignment_column = []
alignment_table = []
# print(actions)
r = RuleGenerator()
# print(actions)
for i in range(len(actions)):
# print(actions)
if actions[i, 1] == r.get_column_rule_id():
alignment_column.append(actions[i, 3])
elif actions[i, 1] == r.get_table_rule_id():
alignment_table.append(actions[i, 2])
return alignment_column, alignment_table
# def stanford_ner(sents):
# parser = CoreNLPParser(url='http://10.249.149.2:9000', tagtype='ner')
# ner = parser.tag(sents)
# ner_tag = [n[1] for n in ner]
# return ner_tag
# def coref_resolution(sents):
#
# nlp = spacy.load('en')
# neuralcoref.add_to_pipe(nlp, greedyness=0.55)
# doc = nlp(sents)
# if(doc._.has_coref):
# print(sents)
# print(doc._.coref_clusters)
# print(doc._.coref_resolved)
def is_copy_from_pre(query, value):
copy_list = set()
# print('value:')
# print(value)
# print('query:')
# print(query)
for i, f in enumerate(value):
for j, t in enumerate(query):
if f.shape == t.shape:
if ((f == t).all()):
copy_list.add(i)
# print(copy_list)
return copy_list
if __name__ == '__main__':
arg_parser = init_arg_parser()
arg_parser.add_argument('--data_path', default='../data/dev.json', type=str, help='dataset')
# arg_parser.add_argument('--train_path', default='../data/train.json', type=str, help='dataset')
arg_parser.add_argument('--table_path', default='../data/tables.json', type=str, help='table dataset')
# arg_parser.add_argument('--dev_output', default='../data/dev.pkl', type=str, help='output data')
arg_parser.add_argument('--output', default='../data/dev.pkl', type=str, help='output data')
args = arg_parser.parse_args()
print('loading')
data = SparcProcessor().get_examples(args)
print('loaded')
rule = RuleGenerator()
dev_data_batch = BatchExample(data, batch_size=args.batch_size, grammar=rule.rule_dict, args=args, shuffle=False)
# print(b.batches[0].col_set)
# print(b.batches[0].tgt_actions.cpu().tolist())
# print(b.batches[0].col_table_dict)
with open(args.output, 'wb') as f:
pickle.dump(dev_data_batch, f)
# coref_resolution(['What are all the airlines .', 'Of it , which is Jetblue Airways ?'])
|
#!/usr/bin/env python
#
# Convert an image file to hexadecimal words of pre-multiplied RGBA data.
#
# -- Micah Dowty <micah@vmware.com>
#
import Image
import sys
im = Image.open(sys.argv[1])
sys.stderr.write("width=%d height=%d\n" % im.size)
words = []
def flush():
print " ".join(words)
del words[:]
for r, g, b, a in im.getdata():
r = r * a // 255
g = g * a // 255
b = b * a // 255
words.append("0x%02x%02x%02x%02x," % (a,r,g,b))
if len(words) >= 6:
flush()
flush()
|
import logging
import pytest
import os
import shutil
import pandas as pd
import numpy as np
from numpy import dot
from numpy.linalg import norm
# import models is necessary to initalize the model steps with orca
from activitysim.abm import models
from activitysim.core import pipeline, config
from activitysim.core import tracing
logger = logging.getLogger(__name__)
# Used by conftest.py initialize_pipeline method
@pytest.fixture(scope="module")
def module() -> str:
"""
A pytest fixture that returns the data folder location.
:return: folder location for any necessary data to initialize the tests
"""
return "non_mandatory_tour_frequency"
# Used by conftest.py initialize_pipeline method
@pytest.fixture(scope="module")
def tables(prepare_module_inputs) -> dict[str, str]:
"""
A pytest fixture that returns the "mock" tables to build pipeline dataframes. The
key-value pair is the name of the table and the index column.
:return: dict
"""
return {
"land_use": "MAZ_ORIGINAL",
"persons": "person_id",
"households": "household_id",
"accessibility": "MAZ_ORIGINAL",
"tours": "tour_id",
}
# Used by conftest.py initialize_pipeline method
# Set to true if you need to read skims into the pipeline
@pytest.fixture(scope="module")
def initialize_network_los() -> bool:
"""
A pytest boolean fixture indicating whether network skims should be read from the
fixtures test data folder.
:return: bool
"""
return False
@pytest.fixture(scope="module")
def load_checkpoint() -> bool:
"""
checkpoint to be loaded from the pipeline when reconnecting.
"""
return "initialize_households"
# make a reconnect_pipeline internal to test module
@pytest.mark.skipif(
os.path.isfile("test/non_mandatory_tour_frequency/output/pipeline.h5"),
reason="no need to recreate pipeline store if alreayd exist",
)
def test_prepare_input_pipeline(initialize_pipeline: pipeline.Pipeline, caplog):
# Run summarize model
caplog.set_level(logging.INFO)
# run model step
pipeline.run(models=["initialize_landuse", "initialize_households"])
# get the updated pipeline data
person_df = pipeline.get_table("persons")
person_df.to_csv("test/non_mandatory_tour_frequency/output/person.csv", index=False)
# get the updated pipeline data
household_df = pipeline.get_table("households")
household_df.to_csv(
"test/non_mandatory_tour_frequency/output/household.csv", index=False
)
pipeline.close_pipeline()
def test_nmtf_from_pipeline(reconnect_pipeline: pipeline.Pipeline, caplog):
# Run summarize model
caplog.set_level(logging.INFO)
# run model step
pipeline.run(
models=["non_mandatory_tour_frequency"], resume_after="initialize_households"
)
# get the updated pipeline data
person_df = pipeline.get_table("persons")
# get the updated pipeline data
household_df = pipeline.get_table("households")
############################
# person nmtf validation
############################
# nmtf person result from the model
logger.info("person nmtf pattern validation")
target_key = "inmf_choice"
simulated_key = "non_mandatory_tour_frequency"
similarity_threshold = 0.99
simulated_df = create_summary(
person_df, key=simulated_key, out_col="Simulated_Share"
)
# result from the TM2 run
target_df = create_summary(person_df, key=target_key, out_col="Target_Share")
# compare simulated and target results
similarity_value = compare_simulated_against_target(
target_df, simulated_df, target_key, simulated_key
)
# if the cosine_similarity >= threshold then the simulated and target results are "similar"
assert similarity_value >= similarity_threshold
# fetch/prepare existing files for model inputs
# e.g. read accessibilities.csv from ctramp result, rename columns, write out to accessibility.csv which is the input to activitysim
@pytest.fixture(scope="module")
def prepare_module_inputs() -> None:
"""
copy input files from sharepoint into test folder
create unique person id in person file
:return: None
"""
# https://wsponlinenam.sharepoint.com/sites/US-TM2ConversionProject/Shared%20Documents/Forms/
# AllItems.aspx?id=%2Fsites%2FUS%2DTM2ConversionProject%2FShared%20Documents%2FTask%203%20ActivitySim&viewid=7a1eaca7%2D3999%2D4d45%2D9701%2D9943cc3d6ab1
test_dir = os.path.join("test", "non_mandatory_tour_frequency", "data")
accessibility_file = os.path.join(test_dir, "tm2_outputs", "accessibilities.csv")
household_file = os.path.join(test_dir, "popsyn", "households.csv")
person_file = os.path.join(test_dir, "popsyn", "persons.csv")
landuse_file = os.path.join(test_dir, "landuse", "maz_data_withDensity.csv")
shutil.copy(accessibility_file, os.path.join(test_dir, "accessibility.csv"))
shutil.copy(household_file, os.path.join(test_dir, "households.csv"))
shutil.copy(person_file, os.path.join(test_dir, "persons.csv"))
shutil.copy(landuse_file, os.path.join(test_dir, "land_use.csv"))
# add original maz id to accessibility table
land_use_df = pd.read_csv(os.path.join(test_dir, "land_use.csv"))
accessibility_df = pd.read_csv(os.path.join(test_dir, "accessibility.csv"))
accessibility_df = pd.merge(
accessibility_df,
land_use_df[["MAZ", "MAZ_ORIGINAL"]].rename(columns={"MAZ": "mgra"}),
how="left",
on="mgra",
)
accessibility_df.to_csv(os.path.join(test_dir, "accessibility.csv"), index=False)
# currently household file has to have these two columns, even before annotation
# because annotate person happens before household and uses these two columns
# TODO find a way to get around this
####
household_df = pd.read_csv(os.path.join(test_dir, "households.csv"))
household_columns_dict = {"HHID": "household_id", "MAZ": "home_zone_id"}
household_df.rename(columns=household_columns_dict, inplace=True)
tm2_simulated_household_df = pd.read_csv(
os.path.join(test_dir, "tm2_outputs", "householdData_1.csv")
)
tm2_simulated_household_df.rename(columns={"hh_id": "household_id"}, inplace=True)
household_df = pd.merge(
household_df,
tm2_simulated_household_df[
[
"household_id",
"autos",
"automated_vehicles",
"transponder",
"cdap_pattern",
"jtf_choice",
]
],
how="inner", # tm2 is not 100% sample run
on="household_id",
)
household_df.to_csv(os.path.join(test_dir, "households.csv"), index=False)
person_df = pd.read_csv(os.path.join(test_dir, "persons.csv"))
person_columns_dict = {"HHID": "household_id", "PERID": "person_id"}
person_df.rename(columns=person_columns_dict, inplace=True)
tm2_simulated_person_df = pd.read_csv(
os.path.join(test_dir, "tm2_outputs", "personData_1.csv")
)
tm2_simulated_person_df.rename(columns={"hh_id": "household_id"}, inplace=True)
person_df = pd.merge(
person_df,
tm2_simulated_person_df[
[
"household_id",
"person_id",
"person_num",
"type",
"value_of_time",
"activity_pattern",
"imf_choice",
"inmf_choice",
"fp_choice",
"reimb_pct",
"workDCLogsum",
"schoolDCLogsum",
]
],
how="inner", # tm2 is not 100% sample run
on=["household_id", "person_id"],
)
# get tm2 simulated workplace and school location results
tm2_simulated_wsloc_df = pd.read_csv(
os.path.join(test_dir, "tm2_outputs", "wsLocResults_1.csv")
)
tm2_simulated_wsloc_df.rename(
columns={"HHID": "household_id", "PersonID": "person_id"}, inplace=True
)
person_df = pd.merge(
person_df,
tm2_simulated_wsloc_df[
[
"household_id",
"person_id",
"WorkLocation",
"WorkLocationLogsum", # this is the same as `workDCLogsum` in tm2 person output
"SchoolLocation",
"SchoolLocationLogsum", # this is the same as `schoolDCLogsum` in tm2 person output
]
],
how="inner", # ctramp might not be 100% sample run
on=["household_id", "person_id"],
)
person_df.to_csv(os.path.join(test_dir, "persons.csv"), index=False)
## get tour data from tn2 output
tm2_simulated_indiv_tour_df = pd.read_csv(
os.path.join(test_dir, "tm2_outputs", "indivTourData_1.csv")
)
tm2_simulated_joint_tour_df = pd.read_csv(
os.path.join(test_dir, "tm2_outputs", "jointTourData_1.csv")
)
tm2_simulated_tour_df = pd.concat(
[tm2_simulated_indiv_tour_df, tm2_simulated_joint_tour_df],
sort=False,
ignore_index=True,
)
tm2_simulated_tour_df.rename(columns={"hh_id": "household_id"}).to_csv(
os.path.join(test_dir, "tours.csv"), index=False
)
####
def create_summary(input_df, key, out_col="Share") -> pd.DataFrame:
"""
Create summary for the input data.
1. group input data by the "key" column
2. calculate the percent of input data records in each "key" category.
:return: pd.DataFrame
"""
out_df = input_df.groupby(key).size().reset_index(name="Count")
out_df[out_col] = round(out_df["Count"] / out_df["Count"].sum(), 4)
return out_df[[key, out_col]]
def cosine_similarity(a, b):
"""
Computes cosine similarity between two vectors.
Cosine similarity is used here as a metric to measure similarity between two sequence of numbers.
Two sequence of numbers are represented as vectors (in a multi-dimensional space) and cosine similiarity is defined as the cosine of the angle between them
i.e., dot products of the vectors divided by the product of their lengths.
:return:
"""
return dot(a, b) / (norm(a) * norm(b))
def compare_simulated_against_target(
target_df: pd.DataFrame,
simulated_df: pd.DataFrame,
target_key: str,
simulated_key: str,
) -> bool:
"""
compares the simulated and target results by computing the cosine similarity between them.
:return:
"""
merged_df = pd.merge(
target_df, simulated_df, left_on=target_key, right_on=simulated_key, how="outer"
)
merged_df = merged_df.fillna(0)
logger.info("simulated vs target share:\n%s" % merged_df)
similarity_value = cosine_similarity(
merged_df["Target_Share"].tolist(), merged_df["Simulated_Share"].tolist()
)
logger.info("cosine similarity:\n%s" % similarity_value)
return similarity_value
|
# Generated by Django 2.1.4 on 2020-07-02 15:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api_basic', '0002_auto_20200701_1245'),
]
operations = [
migrations.RemoveField(
model_name='article',
name='email',
),
]
|
#coding=utf-8
from base import basepage
from selenium.webdriver.common.by import By
class ChargePage(basepage.BasePage):
'''储值模块'''
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<定位器>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# 手机号或卡号
charge_number_loc = (By.ID,'charge_number')
# 确定按钮
charge_confirmBtn_loc = (By.XPATH,'//input[@id="charge_number"]/../div[1]/button[1]')
# 储值奖历规则 送1元
charge_GZ_loc = (By.XPATH,'//input[@name="tcAwards"]/../div[1]/p[2]')
# 自定义储值规则
charge_customGZ_loc = (By.LINK_TEXT, "自定义规则")
# 自定义输入金额
charge_present_loc = (By.ID,'present')
# 确定按钮
charge_customBtn_loc = (By.XPATH,'//input[@id="present"]/../div[1]/button[1]')
# 支付类型
charge_payType_loc =(By.XPATH,'//input[@name="payType"]/..')
# 备注
charge_Remark_loc = (By.ID,'note')
# 储值提交确定按钮
charge_submit_loc = (By.ID,'chargeSubmit')
# 储值余额
charge_RMB_loc = (By.XPATH,'/html/body/div[2]/div/div/div/div[2]/div/div[1]/div')
# 储值最后确认
charge_LastBtn_loc = (By.ID,'chargeCheckBtn')
# 储值成功提示
assertChargeSuccess_loc = (By.XPATH,'//*[@id="chargeSuccessModal"]/div/div/div[1]/h3')
# 立即消费按钮
charge_consumeBtn_loc = (By.ID,'consumeBtn')
# 储值余额
usSaving_loc = (By.ID,'usSaving')
#积分
ownCredit_loc = (By.ID, "ownCredit")
#自定义金额与储值规则相同时错误提示
assert_error_loc = (By.XPATH, "//div[@id='sGift']/../label")
#-------------------------------------补开发票--------------------------------------
# 补开发票
fill_toReceipt_loc = (By.ID,"toReceipt")
# 第一行发票金额
fill_RMB_loc = (By.XPATH,'//*[@id="receipt"]/div[2]/div[2]/div[2]/div[1]/div[5]/input')
# 未开发票金额
#//*[@id="receipt"]/div[2]/div[1]/div[2]/div[1]/div[4] @变更记录,新增加补开发票年份下拉列表
fill_not_RMB_loc = (By.XPATH,'//*[@id="receipt"]/div[2]/div[2]/div[2]/div[1]/div[4]')
# 确定按钮
#//*[@id="receipt"]/div[2]/div[3]/div'
fill_Btn_loc = (By.XPATH,'//*[@id="receipt"]/div[2]/div[4]/div')
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<结束>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def inputPhoneOrCardNo(self,text):
"""输入手机号或卡号"""
self.inputText(text,'储值卡号',*(self.charge_number_loc))
@property
def clickConfirmBtn(self):
"""单击 确定按钮,显示储值信息"""
self.click_button('确定',*(self.charge_confirmBtn_loc))
@property
def clickChargeGZ(self):
"""单击储值奖励规则"""
self.click_button('奖励规则',*(self.charge_GZ_loc))
@property
def clickCustomGZ(self):
"""单击 自定义规则链接"""
self.click_button('自定义规则',*(self.charge_customGZ_loc))
def inputCustomPresent(self,text):
"""输入自定义金额"""
self.inputText(text,'自定义金额',*(self.charge_present_loc))
@property
def clickCustomConfirmBtn(self):
"""单击自定义规则确定按钮"""
self.click_button('自定义规则确定',*(self.charge_customBtn_loc))
def clickPayType(self, index):
"""单击支付类型,现金"""
self.click_btn_index(
'现金支付类型',
index,
*(self.charge_payType_loc)
)
def inputRemark(self,text):
"""输入 备注"""
self.inputText(text,'备注',*(self.charge_Remark_loc))
@property
def clickSubmitBtn(self):
"""单击 确定按钮提交"""
self.click_button('确定',*(self.charge_submit_loc))
@property
def clickLastConfirmBtn(self):
"""单击 最后确认储值按钮"""
self.click_button('确定',*(self.charge_LastBtn_loc))
@property
def clickConsumeBtn(self):
"""单击 立即消费按钮"""
self.click_button('立即消费',*(self.charge_consumeBtn_loc))
@property
def clickFillReceipt(self):
"""单击 补开发票按钮"""
self.click_button('补开发票',*(self.fill_toReceipt_loc))
@property
def clickFillConfirmBtn(self):
"""单击 发票确认按钮"""
self.click_button('确定',*(self.fill_Btn_loc))
def inputFillPresent(self,text):
"""输入 补开发票金额"""
self.inputText(text,'补开发票金额',*(self.fill_RMB_loc))
@property
def assertfindRMB(self):
"""断言,储值余额是否存在"""
bool_Var = self.isOrNoExist(*(self.charge_RMB_loc))
return bool_Var
@property
def getAfterRMB(self):
"""获取储值前余额"""
rmb = self.find_element(
*(self.charge_RMB_loc)).text[:-1]
return rmb
@property
def getLastRMB(self):
"""获取储值后余额"""
rmb = self.find_element(
*(self.usSaving_loc)).text
return rmb
def getNotFillPresent(self,txtName):
"""获取 未开发票金额"""
# 未开票金额
notFill = self.getTagText(
txtName,*(self.fill_not_RMB_loc)).encode('utf-8')
self.getImage()
return notFill
@property
def assertCustom(self):
'''断言进入消费页面'''
bool_var = self.isExist(*(self.assertPhone))
self.getImage()
return bool_var
@property
def assertChargeSuccess(self):
'''断言支付成功'''
bool_var = self.isExist(*(self.assertChargeSuccess_loc))
self.getImage()
return bool_var
def assert_custom_error(self):
"""断言自定义储值规则与储值规则金额相同时错误提示"""
bool_var = self.isExist(*(self.assert_error_loc))
self.getImage()
return bool_var
if __name__=="__main__":
pass |
from sklearn.linear_model import LinearRegression
import pandas as pd
import pylab as plt
import numpy.random as nprnd
import random
import matplotlib.pyplot as plt
# load data
df = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
# explore
df.head()
df.boxplot()
from pandas.tools.plotting import scatter_matrix
scatter_matrix(df, alpha=0.2, figsize=(7,7), diagonal='kde')
# fit linear model
from sklearn.linear_model import LinearRegression
# define dependent and intependent variables
Y = df['sales']
X = df[['TV','radio','newspaper']]
X.insert(0, 'intercept', 1.0) # add one more column for intercept value
X.head()
X.shape
# split the data 80% training 20% testing
from sklearn.cross_validation import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,train_size = 0.8)
X_test.shape
# train the model
import numpy as np
from numpy.linalg import inv
# approach 1
XT = X_train.transpose()
Beta= inv(XT.dot(X_train)).dot(XT).dot(Y_train)
Beta
# approach 2
lm = LinearRegression(fit_intercept=True).fit(X_train, Y_train)
print ( 'lm.intercept_ : ', lm.intercept_)
print ( 'lm.coef_ : ', lm.coef_ )
# find R^2 fir train and test and compare.
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
Y_pred_train = lm.predict(X_train)
Y_pred_test = lm.predict(X_test)
print('train_R^2 score : %.2f' % r2_score(Y_train, Y_pred_train))
print('test_R^2 score : %.2f' % r2_score(Y_test, Y_pred_test))
# plot real against prediction color coded by data pocket (train, test)
plt.scatter(Y_pred_train,Y_train, c="#1c9acc", alpha=0.5 , s = 60 , edgecolors='none', label="train")
plt.scatter(Y_pred_test,Y_test, c="r", alpha=0.5 , s = 60 ,edgecolors='none', label="test")
plt.xlabel('Predicted Sales')
plt.ylabel('Real Sales')
plt.legend(loc= 2 ,fontsize = 'small' )
# compare coefficients
print('TV : %.4f' % lm.coef_[1])
print('Radio : %.4f' % lm.coef_[2])
print('newspaper : %.4f' % lm.coef_[3])
#NOTE: Radio seems to have the highest correlation coefficient followed by TV. There is a negligible correlation with between newspaper and Sales in the presence of other channels of advertising.
# coefficients confidance interval
import scipy, scipy.stats
import statsmodels.formula.api as sm
result = sm.OLS(Y_train, X_train).fit()
result.summary()
result.conf_int()
# calculate the error bar
errbar= result.params - result.conf_int()[0]
# plotting
errbar= result.params - result.conf_int()[0]
coef_data = pd.DataFrame({'coef': result.params.values[1:],
'err': errbar.values[1:],
'varname': errbar.index.values[1:] })
plt.errorbar(range(len(coef_data['varname'])), coef_data['coef'], yerr=coef_data['err'], fmt='o')
plt.xlim(-1,3)
plt.xticks(range(len(coef_data['varname'])), coef_data['varname'])
plt.show()
plt.bar([1,2,3], coef_data['err'])
plt.xticks([1.5,2.5,3.5], ('TV', 'radio', 'newspaper'))
plt.ylim(0,0.025)
coef_data
|
# Generated by Django 2.2.7 on 2019-11-24 15:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sgroups', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exp', models.BooleanField(default=False)),
('expense', models.FloatField(default=0.0)),
('friend1', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fri1', to=settings.AUTH_USER_MODEL)),
('friend2', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fri2', to=settings.AUTH_USER_MODEL)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='grp', to='sgroups.Groups')),
],
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 22 17:12:08 2017
@author: BallBlueMeercat
"""
import numpy as np
def lnlike(theta, x, y, yerr):
m, b = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + model**2)
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
def lnprior(theta):
m, b = theta
if -5.0 < m < 0.5 and 0.0 < b < 10.0:
return 0.0
return -np.inf
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
m_true = 0.1
b_true = 3
theta = [m_true, b_true]
N = 50
sigma = 0.05
mu = 0
x = np.random.rand(N)
yerr = np.random.normal(mu,sigma,N)
y = m_true*x+b_true
y += yerr
lnlike(theta, x, y, yerr)
|
# implement an algo to determine if a string has all unique characters
def all_unique(input_string):
if len(set(input_string)) != len(input_string):
return False
else:
return True
# better version
# return len(set(input_string)) == len(input_string)
def all_unique_without_data(input_string):
if len(input_string) == 1:
return True
else:
character = input_string[0]
for x in input_string[1:]:
if character == x:
return False
return all_unique_without_data(input_string[1:])
# better version
# for i in input_string:
# if input_string.count(i) > 1:
# return False
# else:
# return True
|
#!/usr/bin/env
# 0 Provider
# 1 Provider Country
# 2 SKU
# 3 Developer
# 4 Title
# 5 Version
# 6 Product Type Identifier
# 7 Units
# 8 Developer Proceed
# 9 Begin Date
# 10 End Date
# 11 Customer Currency
# 12 Country Code
# 13 Currency of Proceeds
# 14 Apple Identifier
# 15 Customer Price
# 16 Promo Code
# 17 Parent Identifier
# 18 Subscription
# 19 Period
# 20 Category
# 21 CMB
# 22 Device
# 23 Supported Platforms
# 24 Proceeds Reason
# 25 Preserved Pricing
# 26 Client
# 27 Order Type
import collections
import configparser
import datetime
import gzip
import json
import os
import time
import jwt
import requests
BASE_URL = "https://api.appstoreconnect.apple.com/v1"
# time that token will be valid for (can't be for more than 20 mins)
VALID_TIME = 20 * 60 # seconds
APP_COLORS = {"1RM": "yellow", "Simple Checklist : Repeat it": "orange", "Quantified Groceries": "green"}
def retrieve_sales(n_weeks=10):
config = configparser.ConfigParser()
config.read(os.path.expanduser("~/.appstore"))
user = config.sections()[0]
key_id = config.get(user, 'key_id')
header = {"alg": "ES256", "kid": key_id, "typ": "JWT"}
now = time.time()
expiration = int(now + VALID_TIME)
issuer_id = config.get(user, "issuer_id")
payload = {"iss": issuer_id, "exp": expiration, "aud": "appstoreconnect-v1"}
private_key_filename = os.path.join(os.path.expanduser("~/data"), f"AuthKey_{key_id}.p8")
with open(private_key_filename, "r") as f:
lines = f.readlines()
key = ''.join(lines)
signature = jwt.encode(payload, key, algorithm="ES256", headers=header)
signature = str(signature, "utf-8")
vendor_number = config.get(user, "vendor_number")
url = f"{BASE_URL}/salesReports"
header = {"Authorization": f"Bearer {signature}"}
parameters = {"filter[frequency]": "WEEKLY",
"filter[reportType]": "SALES",
"filter[reportSubType]": "SUMMARY",
"filter[vendorNumber]": f"{vendor_number}",
"filter[reportDate]": "2019-06-02"}
today = datetime.date.today()
days_ago = (today.weekday() - 6) % 7
last_sunday = today - datetime.timedelta(days=days_ago)
all_counters = list()
apps = set()
for i in range(n_weeks):
counter = collections.defaultdict(int)
parameters['filter[reportDate]'] = last_sunday.strftime('%Y-%m-%d')
response = requests.get(url, params=parameters, headers=header)
#print(f"curl -v -H 'Authorization: Bearer {signature}' \"{url}\"")
#print(f"status code: {response.status_code}")
if response.status_code == 200:
data = str(gzip.decompress(response.content), "utf-8")
# first row is header
# title is column 4, units is column 7, developer proceeds is column 8
lines = data.split("\n")
for i, line in enumerate(lines):
if i == 0: continue
if len(line) == 0: continue
tokens = line.split("\t")
if float(tokens[8]) == 0.0: continue
counter[tokens[4]] += int(tokens[7])
apps.add(tokens[4])
#print(f"Report: {last_sunday}, Product: {tokens[4]}, Units: {tokens[7]}")
else:
print(f"Problem retrieves sales data, response code {response.status_code}")
print(response.content)
last_sunday = last_sunday - datetime.timedelta(days=7)
all_counters.insert(0, {"date": last_sunday, "units": counter})
return apps, all_counters
def translate_to_statusboard(apps, sales_units):
datasequences = []
for app_name in apps:
seq = [{"title": week["date"].strftime("%Y-%V"),
"value": week["units"][app_name]}
for week in sales_units]
app = {"title": app_name, "color": APP_COLORS[app_name], "datapoints": seq}
datasequences.append(app)
return datasequences
def output_json(apps, sales_units, filename):
datasequences = translate_to_statusboard(apps, sales_units)
statusboard = {"graph": {"title": "App sales",
"total": False,
"type": "bar",
"refreshEveryNSeconds": 3600,
"datasequences": datasequences}}
json_output = json.dumps(statusboard)
with open(filename, 'w') as f:
f.write(json_output)
def main():
apps, sales_units = retrieve_sales(n_weeks=26)
data_dir = os.path.expanduser("~/data")
output_json(apps, sales_units, os.path.join(data_dir, "ios-app-sales.json"))
if __name__ == "__main__":
main()
|
# WizIO 2021 Georgi Angelov
# http://www.wizio.eu/
# https://github.com/Wiz-IO/wizio-cc
from __future__ import print_function
from SCons.Script import DefaultEnvironment
env = DefaultEnvironment()
platform = env.get("PIOFRAMEWORK", [])[0]
module = platform + "-" + env.BoardConfig().get("build.core")
m = __import__(module)
globals()[module] = m
m.dev_init(env, platform)
#print( env.Dump() )
|
from django.utils import timezone
from main.models import MessageInstance, ActionType
from datetime import datetime
def check(worker):
curr = timezone.now()
threshold = 3 * 3600 # 3 hours
for pg in (e.participant_group for e in worker.bot.botbinding_set.all()):
last_message_instance = pg.messageinstance_set.last()
if pg.activeProblem and (curr - last_message_instance.date).seconds > threshold:
# Maybe it would be better to remove the notification when participant is answering or when sending/answer a problem
for old_notification in pg.messageinstance_set.filter(action_type__value='bot_inactivity_notification', removed=False):
old_notification.remove_message(worker)
text = "Hey, don't miss your chance to answer the problem "\
"and take a higher position in the leaderboard!"
notification_message = worker.bot.send_message(
pg, text)[0]
MessageInstance.objects.create(
action_type=ActionType.objects.get(
value='bot_inactivity_notification'),
date=datetime.fromtimestamp(
notification_message["date"],
tz=timezone.get_current_timezone()),
message_id=notification_message['message_id'],
participant=None,
participant_group=pg,
text=text,
current_problem=pg.activeProblem
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import io
import json
import os
import re
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tweepy
from IPython.core.display import clear_output
from bs4 import BeautifulSoup
from textblob import TextBlob
from tweepy import API
from tweepy.streaming import StreamListener
# Variables Globales
# Configuración acceso api y variables globales
consumer_key = ''
consumer_secret = ''
access_token_key = ''
access_token_secret = ''
# File
file_name = 'tweets.json'
stream_language = ['en']
query_list = ['Curie', 'Planck', 'Einstein', 'Bohr', 'Fleming', 'Higgs']
dir_json = './jsons'
dir_images = './images'
class MyListener(StreamListener):
def __init__(self, output_file, count_max=50, api=None):
self.api = api or API()
self.output_file = output_file
self.counter = 1
self.counter_tweet = 0
self.count_max = count_max
self.start_time = time.time()
self.tweet_data = []
self.status_list = []
def on_status(self, status):
while self.counter <= self.count_max:
clear_output(False)
print('Nº Tweets recuperados: ' + str(self.counter)
+ ' - ' + self.get_time()
, end=' ')
try:
self.status_list.append(status)
json_string = json.dumps(status._json, ensure_ascii=False)
self.tweet_data.append(json_string)
self.counter += 1
return True
except BaseException as ex:
sys.stderr.write("Error on_data:{}\n".format(ex))
return True
with io.open(self.output_file, 'w', encoding='utf-8') as f:
f.write(u'{"tweets":[')
if len(self.tweet_data) > 1:
f.write(','.join(self.tweet_data))
else:
f.write(self.tweet_data[0])
f.write(u']}')
return False
def on_error(self, status):
if status == 420:
print(status)
return False
def get_time(self):
dif = time.strftime("%H:%M:%S",
time.gmtime(time.time() - self.start_time))
return str(dif)
class MyTokenizerClass:
""""
Creamos una clase propia para reunir los métodos encargados de porcesado,
búsqueda, conteo, etc de palabras o caracteres en el texto del tweet.
Vamos a usar una serie de regex para que la partición del texto sea más
acertada que usando simplemente el word_tokenizer de la librería nltk.
Estas regex provienen en parte del blog de Marco Bonzanini, de la web
regex101.com y otras son propias
https://marcobonzanini.com/2015/03/09/mining-twitter-data-with-python-part-2/
"""
emoticons_str = r"""
(?:
[:=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'@[\w_]+', # @-mentions (regex101.com)
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags (marcobonzanini.com)
r'http[s]?://[^\s<>"]+|www\.[^\s<>"]+', # URLs (regex101.com)
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numeros (regex101.com)
r"(?:[a-z][a-z'\-_]+[a-z])", # palabras con - y ' (marcobonzanini.com)
r'(?:[\w_]+)', # otras palabras (regex101.com)
r'(?:[^[:punct:]])' # cualquier otra cosa excepto signos de puntuación
]
tokens_re = re.compile(r'(' + '|'.join(regex_str) + ')',
re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^' + emoticons_str + '$',
re.VERBOSE | re.IGNORECASE)
def num_palabras_caracteres(self, s):
"""
Método que cuenta las palabras y caracteres de un texto
Importante: No se consideran palabras los signos de puntuación,
Serán excluídos con la regex (?:[^[:punct:]]) aplicada
:param s: Cadena de texto a evaluar
:return: Diccionario con ambos valores: número de palabras y número
de caracteres
"""
num = {}
tokens = self.tokens_re.findall(s)
num['palabras'] = len(tokens)
num['caracteres'] = len([char for token in tokens for char in token])
return num
class MyStatisticsClass:
""""
Creamos una clase propia para reunir los métodos encargados de
analizar los tweets
"""
def __init__(self, df_statistic):
self.df = df_statistic
def get_save_picture_path(self, file_name):
"""
Método que devuelve la ruta de grabación de las imágenes
:param file_name: String con el nombre del archivo de imagen
:return: Ruta de grabación.
"""
return os.path.join(dir_images, file_name)
def get_tweets_per_hour(self):
"""
Método que muestra por pantalla el número de tweets agregado por hora,
y crea un gráfico barplot.
:return: Lista de las horas con los tweets creados y gráfico barplot
"""
# Frecuencia por horas
frecuencia_list = self.df.groupby('hora')['id'].count()
# Creamos un df a partir de la serie y renombramos las columnas
df_hour = pd.DataFrame([frecuencia_list]).T
df_hour.rename(columns={'id': 'value'}, inplace=True)
print('Las distribución horaria de los tweets es:\n')
for index, row in df_hour.iterrows():
print('Hora {0} - {1} tweets'.format(index, row['value']))
# Mostramos gráfico
sns.set(color_codes=True)
palette = sns.color_palette('Reds_d', 24)
fig, ax = plt.subplots(figsize=(14, 6))
ax = sns.barplot(df_hour.index, df_hour['value'], alpha=.6,
palette=palette)
for p in ax.patches:
if p.get_height > 0:
ax.annotate("%d" % p.get_height(),
(p.get_x() + p.get_width() / 2.,
p.get_height()),
ha='center', va='center', fontsize=10, color='gray',
fontweight='bold', xytext=(0, 5),
textcoords='offset points')
ax.set(ylabel='Frecuencia', xlabel='Horas')
fig.suptitle(u'Distribución Horaria',
horizontalalignment='center', y=0.95)
plt.savefig(self.get_save_picture_path('Hours.png'),
bbox_inches="tight")
plt.show()
def get_count_word(self, s, word_to_find):
"""
Método para contar el número de ocurrencias de una palabra en una cadena
:param s: cadena en la que buscar
:param word_to_find: palabra a buscar
:return: número de ocurrencias
"""
word_to_find = word_to_find.lower()
s = s.lower()
word_token = re.compile(r'(\W' + word_to_find + '\W)+')
tokens = word_token.findall(s)
return len(tokens)
def get_count_of_query_words(self):
"""
Método que calcula y totaliza la ocurrencia en el texto de los
tweets, de cada uno de los elementos que se usan en la consulta
de filtrado
:return: grafico con las frecuencias
"""
num_cat = len(query_list)
count_list = [0] * num_cat
vect_num = np.vectorize(self.get_count_word)
for idx, val in enumerate(query_list):
count_list[idx] = vect_num(self.df['text'], val.lower()).sum()
df_count = pd.DataFrame({'value': count_list}, index=query_list)
df_count = df_count.sort_values('value', ascending=False)
# Mostramos en pantalla los resultados
print("Los valores obtenidos son:\n")
for index, row in df_count.iterrows():
print('{0} - {1} ocurrencias'.format(index, row['value']))
# Mostramos gráfico
sns.set(color_codes=True)
palette = sns.color_palette('Oranges_d', num_cat)
fig, ax = plt.subplots(figsize=(14, 6))
ax = sns.barplot(df_count.index, df_count['value'], alpha=.6,
palette=palette)
ax.set(ylabel='Frecuencia', xlabel=u'Términos de búsqueda')
for p in ax.patches:
if p.get_height > 0:
ax.annotate("%d" % p.get_height(),
(p.get_x() + p.get_width() / 2.,
p.get_height()),
ha='center', va='center', fontsize=10, color='gray',
fontweight='bold', xytext=(0, 5),
textcoords='offset points')
fig.suptitle(u'Frecuencia de Aparición',
horizontalalignment='center', y=0.95)
plt.savefig(self.get_save_picture_path('Frequency.png'),
bbox_inches="tight")
plt.show()
def get_time_zone_distribution(self):
"""
Método para obtener la 10 distribuciones horarias más frecuentes de los
tweets creados
:return: barplot
"""
df_time = self.df[self.df['time_zone'].notnull()]
grupo = df_time.groupby('time_zone')['id'].count().nlargest(10)
# Creamos un df a partir de la serie y renombramos las columnas
df_time = pd.DataFrame([grupo]).T
df_time.rename(columns={'id': 'value'}, inplace=True)
num_cat = df_time.shape[0]
# Mostramos en pantalla los resultados
print("Las 10 Zonas Horarias con mayor número de tweets son:\n")
for index, row in df_time.iterrows():
print('{0} - {1} tweets'.format(index, row['value']))
# Mostramos gráfico
sns.set(color_codes=True)
palette = sns.color_palette('Greens_d', num_cat)
fig, ax = plt.subplots(figsize=(14, 10))
ax = sns.barplot(df_time.index, df_time['value'], alpha=.6,
palette=palette)
ax.set(ylabel='Frecuencia', xlabel=u'Zonas')
for p in ax.patches:
if p.get_height > 0:
ax.annotate("%d" % p.get_height(),
(p.get_x() + p.get_width() / 2.,
p.get_height()),
ha='center', va='center', fontsize=10, color='gray',
fontweight='bold', xytext=(0, 5),
textcoords='offset points')
fig.suptitle(u'Distribución 10 Zonas Horarias más frecuentes',
horizontalalignment='center', y=0.95)
plt.xticks(rotation=90)
plt.savefig(self.get_save_picture_path('TimeZone.png'),
bbox_inches="tight")
plt.show()
def get_porcentaje_fuente_tweet(self):
"""
Método para obtener los porcentajes de los dispositivos en los que
se crearon los tweets
:return: porcentaje de df['source'] y gráfico
"""
# Calculamos el porcentaje de cada origen con respecto al total
grupo = self.df.groupby('source')['id'].count()
num_total_registros = self.df.shape[0]
grupo = (grupo * 100) / num_total_registros
# Cogemos los índices que suponen los 5 mayores porcentajese
# el resto los agrupamos en 'otros'.
top_index = grupo.nlargest(5).index
others_index = [i for i in grupo.index if i not in top_index]
# Creamos un df a partir de la serie y renombramos las columnas
df_percent = pd.DataFrame([grupo]).T.reset_index()
df_percent.rename(columns={'id': 'value'}, inplace=True)
# Si el agregado por source devuelve más de 5 categorías,
# reemplazamos los valores que no pertenezcan a los 5 mayores por Otros
if len(others_index) > 0:
df_percent = df_percent.replace(others_index, 'Otros')
# Vemos cuales son los porcentajes de los orígenes
percent = df_percent.groupby('source').sum().reset_index()
percent = percent.sort_values('value', ascending=False)
# Mostramos en pantalla los porcentajes obtenidos
print("Los porcentajes por origen son:\n")
for index, row in percent.iterrows():
print('{} - {:,.2f}% '.format(row['source'], row['value']))
# Mostramos el gráfico
fig, ax = plt.subplots(figsize=(14, 6))
palette = sns.color_palette('Pastel1')
ax.pie(percent['value'], labels=percent['source'],
autopct='%1.1f%%',
startangle=90, colors=palette)
ax.axis('equal')
fig.suptitle(u'Distribución por Origen',
horizontalalignment='center', y=0.95,
fontsize=14)
plt.legend(bbox_to_anchor=(1.1, 1))
plt.savefig(self.get_save_picture_path('Sources.png'),
bbox_inches="tight")
plt.show()
def get_polarity_classification(self, s):
"""
Método para clasificar la polaridad de un texto usando textblob
:param s: cadena de texto
:return: Polaridad que puede ser: Positiva, Neutra o Negativa
"""
# Primero limpiamos el texto eliminando caracteres especiales, links,..
s = ' '.join(
re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)",
" ", s).split())
analysis = TextBlob(s)
if analysis.sentiment.polarity > 0:
return 'Positiva'
elif analysis.sentiment.polarity == 0:
return 'Neutra'
else:
return 'Negativa'
def get_sentimental_analysis(self):
"""
Método que devuelve los resultados obtenidos y un gráfico tras aplicar
el análisis sentimental a los tweets
:param: columna text del df
:return: porcentaje polaridad y gráfico
"""
grupo = self.df.groupby('sa')['id'].count().sort_index(
ascending=False)
num_total_registros = self.df.shape[0]
grupo = (grupo * 100) / num_total_registros
# Creamos un df a partir de la serie y renombramos las columnas
df_sent = pd.DataFrame([grupo], ).T.reset_index()
df_sent.columns = ['sa', 'value']
df_sent['value'] = pd.to_numeric(df_sent['value'])
# Mostramos en pantalla los porcentajes obtenidos
print("Los porcentajes por Polaridad son:\n")
for index, row in df_sent.iterrows():
print('{} - {:,.2f}% '.format(row['sa'], row['value']))
# Mostramos el gráfico
fig, ax = plt.subplots(figsize=(14, 6))
palette = sns.color_palette('Pastel1')
ax.pie(df_sent['value'], labels=df_sent['sa'], autopct='%1.1f%%',
startangle=90, colors=palette)
ax.axis('equal')
fig.suptitle(u'Sentimental Analysis',
horizontalalignment='center', y=0.95,
fontsize=14)
plt.legend(bbox_to_anchor=(1.1, 1))
plt.savefig(self.get_save_picture_path('Sentimental.png'),
bbox_inches="tight")
plt.show()
def get_media_longitud(self):
"""
Método para obtener la media de la longitud de los tweets
:return: valor medio de df['num_caracteres']
"""
media = np.mean(self.df['num_caracteres'])
print('La longitud media de los tweets es: {:.0f} caracteres'
.format(media))
def get_custom_max_min(self, name_col, max_min='min'):
"""
Método para devolver el índice de la fila que posee el mayor valor
de la columna que se pasa como parámetro
:param max_min: cadena que indica si se busca el máximo o el mínimo
:param name_col: columna del df
:return: Diccionario que contiene el valor máximo hallado y el índice
respectivo
"""
result = {}
if max_min == 'max':
valor = np.max(self.df[name_col])
else:
valor = np.min(self.df[name_col])
result['valor'] = valor
result['index'] = self.df[self.df[name_col] == valor].index[0]
return result
def get_connection_api():
"""
Método que devuelve la conexión al api de twitter
:return: auth
"""
# Conectamos con el api haciendo uso de las credenciales proporcionadas
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token_key, access_token_secret)
return auth
def search_with_stream():
"""
Método que invoca el comando de escucha de twitter y devuelve una lista
con los tweets obtenidos. El número de tweets a recopilar es solicitado
en el momento de la ejecución.
:return: status_list
"""
while True:
# Usamos el método raw_input porque estamos en la versión 2.7.
# En la 3.6 no existe
user_input = raw_input(
'Cuantos tweets desea recuperar con el stream?\n')
try:
num_tweets = int(user_input)
break
except ValueError:
print("El valor introducido no es un número entero.\n")
print('Comienza a ejecutarse el stream .....')
auth = get_connection_api()
listener = MyListener(file_name, num_tweets)
api = tweepy.streaming.Stream(auth, listener, tweet_mode='extended')
api.filter(languages=stream_language,
track=query_list, async=False)
return listener.status_list
def create_dataframe_from_list(tweet_list):
columns = ['id', 'created_at', 'user', 'location', 'text',
'full_text_flag', 'source', 'time_zone', 'from_file']
index = pd.Series(tweet.id for tweet in tweet_list)
rows_list = []
for tweet in tweet_list:
truncated = tweet.truncated
if truncated:
text = tweet.extended_tweet['full_text']
full_text_flag = 'S'
else:
text = tweet.text
full_text_flag = 'N'
data = {'id': tweet.id,
'created_at': tweet.created_at,
'user': tweet.user.name,
'location': tweet.user.location,
'text': text.encode('ascii', 'ignore').lower(),
'full_text_flag': full_text_flag,
'source': tweet.source,
'time_zone': tweet.user.time_zone,
'from_file': 'direct_from_list'}
rows_list.append(data)
df_list = pd.DataFrame(rows_list, columns=columns, index=index)
df_list.index.name = 'id'
# Cambiamos el datatype de la columna created_at a datetime
df_list['created_at'] = pd.to_datetime(df_list['created_at'])
# Creamos la nueva columna con la hora
df_list['hora'] = df_list['created_at'].dt.hour
return df_list
def create_dataframe_from_json():
"""
Método para obtener un pandas dataframe a partir de un archivo json que
contiene tweets almacenados
:return: dataframe con las columnas 'created_at', 'user', 'location',
'text', 'full_text_flag, 'hora', 'source', 'time_zone', 'from_file'
y como índices el id del tweet
"""
columns = ['id', 'created_at', 'user', 'location', 'text',
'full_text_flag', 'source', 'time_zone', 'from_file', 'hora']
df_json = pd.DataFrame(columns=columns)
for root, dirs, filenames in os.walk(dir_json):
for f in filenames:
print('Cargando archivo ' + f)
file_path = os.path.join(dir_json, f)
df_json = df_json.append(create_partial_df(file_path))
return df_json
def create_partial_df(file_path):
try:
with open(file_path, 'r') as f:
file_name_aux = os.path.basename(os.path.normpath(file_path))
tweets = json.loads(f.read())
index = pd.Series(x['id'] for x in tweets['tweets'])
columns = ['id', 'created_at', 'user', 'location', 'text',
'full_text_flag', 'source', 'time_zone', 'from_file']
rows_list = []
for x in tweets['tweets']:
soup = BeautifulSoup(x['source'], 'html5lib')
source = soup.a.get_text()
truncated = x['truncated']
if truncated:
text = x['extended_tweet']['full_text']
full_text_flag = 'S'
else:
text = x['text']
full_text_flag = 'N'
data = {'id': x['id'],
'created_at': x['created_at'],
'user': x['user']['name'],
'location': x['user']['location'],
'text': text.encode('ascii', 'ignore').lower(),
'full_text_flag': full_text_flag,
'source': source.encode('ascii', 'ignore'),
'time_zone': x['user']['time_zone'],
'from_file': file_name_aux}
rows_list.append(data)
df_aux = pd.DataFrame(rows_list, columns=columns, index=index)
df_aux.index.name = 'id'
# Cambiamos el datatype de la columna created_at a datetime
df_aux['created_at'] = pd.to_datetime(df_aux['created_at'])
# Creamos la nueva columna con la hora
df_aux['hora'] = df_aux['created_at'].dt.hour
return df_aux
except BaseException as ex:
sys.stderr.write("Error on_data:{}\n".format(ex))
time.sleep(5)
return False
def create_menu_principal():
print('Escoja entre una de la siguientes opciones')
print('1- Búsqueda Con Stream')
print('2- Estadísticas desde los json adjuntos (dir:./json)')
print('3- Salir')
option = int(input('Que opción desea?'))
return option
def main():
global df
if os.path.isfile(file_name):
os.remove(file_name)
option = create_menu_principal()
if option == 1:
tweets_list = search_with_stream()
df = create_dataframe_from_list(tweets_list)
elif option == 2:
df = create_dataframe_from_json()
else:
exit(0)
# Instanciamos la clase MyTokenizerClass para poder trabajar con ella
mtk = MyTokenizerClass()
# Número de palabras y caracteres
vect_num = np.vectorize(mtk.num_palabras_caracteres)
df['num_palabras'] = [d['palabras'] for d in vect_num(df['text'])]
df['num_caracteres'] = [d['caracteres'] for d in vect_num(df['text'])]
print('\n')
# Instanciamos la clase MyStatisticsClass para poder trabajar con ella
msc = MyStatisticsClass(df)
# Distribución de tweets a lo largo del día
print('\n')
msc.get_tweets_per_hour()
# Distribución de los elementos de la consulta de filtrado
print('\n')
msc.get_count_of_query_words()
# Distribución de las zonas horarias
print('\n')
msc.get_time_zone_distribution()
# Distribución Fuentes de los tweets
print('\n')
msc.get_porcentaje_fuente_tweet()
# Sentimental analysis de los tweets
vect_pol = np.vectorize(msc.get_polarity_classification)
df['sa'] = vect_pol(df['text'])
print('\n')
msc.get_sentimental_analysis()
# Longitud media de los tweets
print('\n')
msc.get_media_longitud()
# Tweets con mayor número de caracteres
max_carac = msc.get_custom_max_min('num_caracteres', 'max')
print('\n')
print("El tweet más largo es: \n{}"
.format((df['text'][max_carac['index']])))
print("Nº de caracteres: {}".format(max_carac['valor']))
# Tweets con menor número de caracteres
min_carac = msc.get_custom_max_min('num_caracteres', 'min')
print('\n')
print("El tweet más corto es: \n{}"
.format((df['text'][min_carac['index']])))
print("Nº de caracteres: {}".format(min_carac['valor']))
# Tweets totales
print('\n')
print("Total de tweets recogidos: {}".format(df.shape[0]))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt as e:
print(e.message)
|
from Classification.SVM import SVM
from Classification.NeuralNetwork import NeuralNetwork
from Classification.LogisticRegression import LogisticRegression
from Classification.GradientBoostingTree import GradientBoostingTree
from Classification.RandomForest import RandomForest
from config import *
import numpy as np
class Classifier:
def __init__(self, outputFile):
self.outputFile = outputFile
self._dataLoaded = False
self._createdClassifier = False
self.xTrainData = []
self.yTrainData = []
self.xDevData = []
self.yDevData = []
def loadData(self, xdataDir, ydataDir):
for i in range(crossValidationFold):
self.xTrainData.append(np.load(xdataDir + "xTrain" + str(i) + ".npy"))
self.yTrainData.append(np.load(ydataDir + "yTrain" + str(i) + ".npy"))
self.xDevData.append(np.load(xdataDir + "xDev" + str(i) + ".npy"))
self.yDevData.append(np.load(ydataDir + "yDev" + str(i) + ".npy"))
self._dataLoaded = True
def createClassifier(self, classifier, **kwargs):
if classifier == "SVM":
self.clf = SVM(**kwargs)
elif classifier == "NeuralNetwork":
self.clf = NeuralNetwork(**kwargs)
elif classifier == "LogisticRegression":
self.clf = LogisticRegression(**kwargs)
elif classifier == "GradientBoostingTree":
self.clf = GradientBoostingTree(**kwargs)
elif classifier == "RandomForest":
self.clf = RandomForest(**kwargs)
else:
raise RuntimeError("Error: specified wrong classifier")
self._createdClassifier = True
def classify(self):
trainingTimes = []
testingTimes = []
accuracies = []
if self.metAllRequirement():
for i in range(crossValidationFold):
trainingTimes.append(self.clf.train(self.xTrainData[i], self.yTrainData[i]))
[testingTime, accuracy] = self.clf.test(self.xDevData[i], self.yDevData[i])
testingTimes.append(testingTime)
accuracies.append(accuracy)
self.outputFile.write("%16.2f %16.2f %16.2f %16.2f %15.2f%% %15.2f%%\n" %
(np.mean(trainingTimes)*1000, np.std(trainingTimes)*1000,
np.mean(testingTimes)*1000, np.std(testingTimes)*1000,
np.mean(accuracies)*100, np.std(accuracies)*100))
#self.outputFile.flush()
else:
raise RuntimeError("Error: need to createClassifier and loadData first")
def metAllRequirement(self):
return self._createdClassifier and self._dataLoaded
|
from setuptools import setup
setup(
name="mysci",
version="1.0.0",
description="A sample package",
author="Xdev, DDamico",
author_email="damico@ucar.edu",
packages=["mysci"],
install_requires=[])
|
import unittest
import os
from APIGetCSVWrite.src import APIHelper
class TestAPI(unittest.TestCase):
def _setUp(self, url):
self.obj = APIHelper.APIHelper(url)
def tearDown(self):
pass
def test_responseNotEmpty(self):
url = "https://swapi.dev/api/people/"
o_file_name = 'output_001.csv'
self._setUp(url)
api_o = self.obj.star_wars_characters()
self.obj.append_to_file(o_file_name)
assert type(api_o) is list
assert len(api_o) != 0
if __name__ == '__main__':
unittest.main()
|
from django import template
register = template.Library()
# @register.filter
# def foo(self):
# return self + ' active'
@register.filter
def active(value, arg):
if(value == arg):
return 'active'
else:
'' |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 3 21:50:31 2017
@author: Marco
"""
from person import API_KEY, API_SECRET, mailPass
import huo_bi_utils, misc
import time
mailHost = misc.getConfigKeyValueByKeyName('config.ini', 'mail', 'mailHost')
mailUser = misc.getConfigKeyValueByKeyName('config.ini', 'mail', 'mailUser')
receivers = misc.getConfigKeyValueByKeyName('config.ini', 'mail', 'receivers').split(',')
#交易对
symbolValue='btcusdt'
moneyName='usdt'
coinName='btc'
client = huo_bi_utils.ApiClient(API_KEY, API_SECRET)
accs = client.get('/v1/account/accounts')
accountIdValue = accs[0].id
global buySignal
buySignal=0
global sellSignal
sellSignal=0
buySignalMax=4
sellSignalMax=4
#获取五日均线斜率
def getMA5SlopeList():
kLine = client.get('/market/history/kline',symbol=symbolValue,period='60min',size='9')
'''五日均线'''
ma5 = misc.getMALine(kLine,5)
'''五日均线斜率'''
ma5Slope = misc.getSlope(ma5)
return ma5Slope
def getMa5AndCloseAndFatherMa5Slope():
kLine = client.get('/market/history/kline',symbol=symbolValue,period='15min',size='5')
'''五日均线'''
ma5 = misc.getMALine(kLine,5)
last = kLine[0]['close']
fatherKLine = client.get('/market/history/kline',symbol=symbolValue,period='60min',size='7')
fatherMa5 = misc.getMALine(fatherKLine,6)
fatherMa5Slope = misc.getSlope(fatherMa5)
return ma5[0], last, fatherMa5Slope[0]
#判断是否要买入
def isBuy(slopeList):
condition1=slopeList[0] > abs(slopeList[2]) and 0 > slopeList[1] and slopeList[1] > slopeList[2] and slopeList[2] > slopeList[3]
condition2=slopeList[0] > 3
print('isBuy条件判断情况:','\t',condition1,'\t',condition2)
if condition1 or condition2:
'''调用买入'''
result = place(getBlance(moneyName),'buy-market')
if result['status'] == 'ok':
'''当status为ok时,返回订单id'''
return result['data']
#判断是否要卖出
def isSell(slopeList):
condition1=slopeList[0] < -30
condition2=(slopeList[1]+slopeList[2]+slopeList[3]) < -30
condition3=slopeList[0] < -20 and 0 < slopeList[1] and slopeList[1] < slopeList[2] and slopeList[2] < slopeList[3]
condition4=slopeList[0] < -40 and 0 < slopeList[1] and 0 < slopeList[2] and 0 < slopeList[3]
print('isSell条件判断情况:','\t',condition3,'\t',condition4)
if condition3 or condition4:
'''调用卖出'''
result = place(getBlance(coinName),'sell-market')
if result['status'] == 'ok':
'''当status为ok时,返回订单id'''
return result['data']
#查询当前成交、历史成交
def getMatchResults():
matchResults = client.get('/v1/order/matchresults',symbol=symbolValue)
return matchResults
#查询当前委托、历史委托
def getOrders():
orders = client.get('/v1/order/orders',symbol=symbolValue,states='filled')
return orders
#判断最后一个API订单是买入,还是卖出,成交价格,手续费
def lastMatchResultIsWhatAndPriceAndFees(matchResults):
for i in range(len(matchResults)):
matchResult = matchResults[i]
source = matchResult['source']
symbol = matchResult['symbol']
typeValue = matchResult['type']
price = matchResult['price']
fees = matchResult['filled-fees']
if not source == 'web':
continue
if not symbol == symbolValue:
continue
if typeValue == 'buy-market' or typeValue == 'buy-limit':
return 'buy',price,fees
if typeValue == 'sell-market' or typeValue == 'sell-limit':
return 'sell',price,fees
#创建并执行一个新订单,返回订单ID
def place(amount,typeValue):
params={}
params['account-id']=accountIdValue
params['amount']=amount#限价单表示下单数量,市价买单时表示买多少钱,市价卖单时表示卖多少币
params['symbol']=symbolValue
params['type']=typeValue
print(params)
return client.post('/v1/order/orders/place',params)
#查询某个订单详情
def getOrderInfo(orderId):
return client.get('/v1/order/orders/%s' % orderId)
#查询最后一个买入订单的成交均价*1.005的价格
def getLastBuyOrderPrice():
matchResults=getMatchResults()
for match in matchResults:
if match['type']=='buy-market' and match['symbol']==symbolValue:
return round(float(match['price']) * 1.01,4)
#获取可用余额,usdt或btc
def getBlance(currency):
subaccs = client.get('/v1/account/accounts/%s/balance' % accountIdValue)
for sub in subaccs['list']:
if sub['currency'] == currency and sub['type'] == 'trade':
return sub['balance']
#字符串截取的方式获取小数点后保留4位的数字
def getFloatStr(numberStr):
pointIndex=numberStr.index('.')
flaotStr=numberStr[0:5+pointIndex]
return flaotStr
def isBuyOrSellByMa5ValueAndCloseValue(operationType,ma5Value,closeValue,fatherMa5Slope):
condition1=ma5Value < closeValue #true为买入机会,false为卖出机会
condition2=fatherMa5Slope > 0 #当true,为上升趋势,false为下跌趋势
print('均线=',ma5Value,'\t收盘价=',closeValue,'\t趋势指导=',fatherMa5Slope,'\tcondition1=',condition1,'\tcondition2=',condition2)
global buySignal
global sellSignal
if condition1:
if operationType == 'buy':
print('已买入,等待卖出机会')
buySignal = 0
return 'buy', None
if not condition2:
print('下跌趋势不买入')
buySignal = 0
return 'sell', None
print('买入信号+1')
buySignal = buySignal+1
if buySignal >= buySignalMax:
#买入操作
amount=misc.getConfigKeyValueByKeyName('config.ini',symbolValue,'usdt')
orderId = place(amount,'buy-market')
buySignal = 0
return 'buy', orderId
else:
return 'sell', None
else:
if operationType == 'sell':
print('已卖出,等待买入机会')
sellSignal = 0
return 'sell', None
#卖出操作
price=getLastBuyOrderPrice()
if condition2:
if price > closeValue:#上升趋势,且盈利价格>市价时,不卖出
print('price=',price)
print('closeValue=',closeValue)
print('上升趋势,涨幅未超过1%,不卖出')
sellSignal = 0
return 'buy', None
print('卖出信号+1')
sellSignal = sellSignal+1
if sellSignal >= sellSignalMax:
print('price=',price)
print('closeValue=',closeValue)
balanceStr=getBlance(coinName)
pointIndex=balanceStr.index('.')
amount=balanceStr[0:5+pointIndex]
orderId = place(amount,'sell-market')
sellSignal = 0
return 'sell', orderId
else:
return 'buy' ,None
def tactics2(operationType):
try:
print(misc.getTimeStr())
ma5Value, closeValue, fatherMa5Slope= getMa5AndCloseAndFatherMa5Slope()
operation,orderId=isBuyOrSellByMa5ValueAndCloseValue(operationType,ma5Value,closeValue,fatherMa5Slope)
misc.setConfigKeyValue('config.ini',symbolValue,'type',operation)
if orderId:
time.sleep(30)
if operation == 'sell':
tempOrder=getOrderInfo(orderId)
fieldCashAmount=float(tempOrder['field-cash-amount'])
fieldFees=float(tempOrder['field-fees'])
usdt=getFloatStr(str(fieldCashAmount-fieldFees))
misc.setConfigKeyValue('config.ini',symbolValue,'usdt',usdt)
orderInfo=getMatchResults()[0]
print(orderInfo)
content='<html>'
content+='<p>symbol(交易对)=%s</p>' % orderInfo['symbol']
content+='<p>filled-amount(订单数量)=%s</p>' % orderInfo['filled-amount']
content+='<p>filled-fees(已成交手续费)=%s</p>' % orderInfo['filled-fees']
content+='<p>price(成交价格)=%s</p>' % orderInfo['price']
content+='<p>type(订单类型(buy-market:市价买, sell-market:市价卖))=%s</p>' % orderInfo['type']
content+='<p>%s</p>' % str(orderInfo)
content+='</html>'
misc.sendEmail(mailHost, mailUser, mailPass, receivers, '交易报告', content)
except Exception as e:
print(e)
'''main'''
isTrue=True
while isTrue:
#获取最后一次操作的类型,buy、sell
operationType=misc.getConfigKeyValueByKeyName('config.ini',symbolValue,'type')
tactics2(operationType)
time.sleep(15)
|
from flask import Flask, jsonify,request;
app=Flask(__name__);
@app.route("/", methods=['GET'])
def hello():
return jsonify({"Greeting":"Hello World"});
@app.route("/", methods=['POST'])
def abc():
return jsonify({"Finish":"Bye"});
@app.route("/abc/", methods=['GET','POST'])
def xyz():
if(request.method == 'GET'):
return jsonify({"Welcome":"Guest!!!"});
if(request.method == 'POST'):
return jsonify({"Byeeeee":"Guesttrtttttttttt!!!"});
@app.route("/cube/<int:num>",methods=['GET'])
def cube(num):
return jsonify({'result':num*num*num});
if __name__ == '__main__':
app.run(debug=True);
|
from pyvisa import constants
from enum import Enum, IntEnum
class Bank(Enum):
"""
NI ELVIS III bank.
Values:
A: bank A
B: bank B
"""
A = 'A'
B = 'B'
class AIChannel(IntEnum):
""" NI ELVIS III Analog Input channel. """
AI0 = 0
AI1 = 1
AI2 = 2
AI3 = 3
AI4 = 4
AI5 = 5
AI6 = 6
AI7 = 7
class AOChannel(IntEnum):
""" NI ELVIS III Analog Output channel. """
AO0 = 0
AO1 = 1
class DIOChannel(IntEnum):
""" NI ELVIS III Digital Input and Output channel. """
DIO0 = 0
DIO1 = 1
DIO2 = 2
DIO3 = 3
DIO4 = 4
DIO5 = 5
DIO6 = 6
DIO7 = 7
DIO8 = 8
DIO9 = 9
DIO10 = 10
DIO11 = 11
DIO12 = 12
DIO13 = 13
DIO14 = 14
DIO15 = 15
DIO16 = 16
DIO17 = 17
DIO18 = 18
DIO19 = 19
class AIRange(Enum):
"""
NI ELVIS III Analog Input range in volt.
Values:
PLUS_OR_MINUS_1V:
Specifies the current allowed maximum value as 1V and minimum
value as -1V for the channel.
PLUS_OR_MINUS_2V:
Specifies the current allowed maximum value as 2V and minimum
value as -2V for the channel.
PLUS_OR_MINUS_5V:
Specifies the current allowed maximum value as 5V and minimum
value as -5V for the channel.
PLUS_OR_MINUS_10V:
Specifies the current allowed maximum value as 10V and minimum
value as -10V for the channel.
"""
PLUS_OR_MINUS_1V = '+/-1V'
PLUS_OR_MINUS_2V = '+/-2V'
PLUS_OR_MINUS_5V = '+/-5V'
PLUS_OR_MINUS_10V = '+/-10V'
class AIMode(Enum):
"""
NI ELVIS III Analog Input mode.
Values:
NONE:
Determines the voltage of a channel.
DIFFERENTIAL:
Determines the "differential" voltage between two separate
channels.
"""
SINGLE_ENDED = False
DIFFERENTIAL = True
class EncoderChannel(IntEnum):
""" NI ELVIS III Encoder channel. """
ENC0 = 0
ENC1 = 1
ENC2 = 2
ENC3 = 3
ENC4 = 4
ENC5 = 5
ENC6 = 6
ENC7 = 7
ENC8 = 8
ENC9 = 9
class EncoderMode(Enum):
"""
NI ELVIS III Encoder mode.
Values:
QUADRATURE:
Specifies that the encoder generates two phase signals that are
offset by 90 degrees. The count value changes each time there is a
falling or rising edge on either of the phases. Most encoders
generate quadrature phase signals.
STEP_AND_DIRECTION:
Specifies that the encoder generates a direction signal and a
clock signal. The direction signal determines the direction of the
encoder. The count value changes on every rising edge of the clock
signal.
"""
QUADRATURE = 'quadrature'
STEP_AND_DIRECTION = 'step and direction'
class UARTBaudRate(IntEnum):
""" NI ELVIS III UART baud rate. """
RATE110 = 110
RATE300 = 300
RATE600 = 600
RATE1200 = 1200
RATE2400 = 2400
RATE4800 = 4800
RATE9600 = 9600
RATE19200 = 19200
RATE38400 = 38400
RATE57600 = 57600
RATE115200 = 115200
RATE230400 = 230400
class UARTDataBits(IntEnum):
""" NI ELVIS III UART data bits. """
BITS7 = 7
BITS8 = 8
class UARTStopBits(Enum):
"""
NI ELVIS III UART stop bits.
Values:
ONE: 1 stop bit
TWO: 2 stop bits
"""
ONE = constants.StopBits.one
TWO = constants.StopBits.two
class UARTParity(Enum):
""" NI ELVIS III UART parity. """
NO = constants.Parity.none
ODD = constants.Parity.odd
EVEN = constants.Parity.even
class UARTFlowControl(Enum):
"""
NI ELVIS III UART flow control.
Values:
NONE:
The transfer mechanism does not use flow control. Buffers on both
sides of the connection are assumed to be large enough to hold all
data transferred.
XOR_XOFF:
The transfer mechanism uses the XON and XOFF characters to perform
flow control. The transfer mechanism controls input flow by sending
XOFF when the receive buffer is nearly full, and it controls the
output flow by suspending transmission when XOFF is received.
RTS_CTS:
The transfer mechanism uses the XON and XOFF characters to perform
flow control. The transfer mechanism controls input flow by sending
XOFF when the receive buffer is nearly full, and it controls the
output flow by suspending transmission when XOFF is received.
DTR_DSR:
The transfer mechanism uses the DTR output signal and the DSR input
signal to perform flow control. The transfer mechanism controls
input flow by unasserting the DTR signal when the receive buffer is
nearly full, and it controls output flow by suspending the
transmission when the DSR signal is unasserted.
"""
NONE = constants.VI_ASRL_FLOW_NONE
XOR_XOFF = constants.VI_ASRL_FLOW_XON_XOFF
RTS_CTS = constants.VI_ASRL_FLOW_RTS_CTS
DTR_DSR = constants.VI_ASRL_FLOW_DTR_DSR
class I2CSpeedMode(Enum):
"""
NI ELVIS III I2C speed mode.
Values:
STANDARD: 100 kbps
FAST: 400 kbps
"""
STANDARD = 'STANDARD'
FAST = 'FAST'
class SPIClockPhase(Enum):
"""
NI ELVIS III SPI clock phase.
Values:
LEADING:
The data is stable on the leading edge and changes on the trailing
edge.
TRAILING:
The data is stable on the trailing edge and changes on the leading
edge.
"""
LEADING = 'Leading'
TRAILING = 'Trailing'
class SPIClockPolarity(Enum):
"""
NI ELVIS III SPI clock polarity.
Values:
LOW:
The clock signal is low when idling, the leading edge is a rising
edge, and the trailing edge is a falling edge.
HIGH:
The clock signal is high when idling, the leading edge is a
falling edge, and the trailing edge is a rising edge.
"""
LOW = 'Low'
HIGH = 'High'
class SPIDataDirection(Enum):
"""
NI ELVIS III SPI data direction.
Values:
LSB:
Send the least significant bit first and the most significant bit
last.
MSB:
Send the most significant bit first and the least significant bit
last.
"""
LSB = 'Least Significant Bit First'
MSB = 'Most Significant Bit First'
class Led(IntEnum):
"""
NI ELVIS III LED.
Values:
LED0:
Enables state setting for LED0.
LED1:
Enables state setting for LED1.
LED2:
Enables state setting for LED2.
LED3:
Enables state setting for LED3.
"""
LED0 = 0
LED1 = 1
LED2 = 2
LED3 = 3
class IRQNumber(IntEnum):
""" NI ELVIS III interrupt number. """
IRQ1 = 1
IRQ2 = 2
IRQ3 = 3
IRQ4 = 4
IRQ5 = 5
IRQ6 = 6
IRQ7 = 7
IRQ8 = 8
class AIIRQChannel(IntEnum):
""" NI ELVIS III Analog Input Interrupt channel """
AI0 = 0
AI1 = 1
class AIIRQType(Enum):
"""
NI ELVIS III Analog Input Interrupt type.
Values:
RISING:
Register an interrupt on a rising edge of the analog input signal.
FALLING:
Register an interrupt on a falling edge of the analog input
signal.
"""
RISING = 'rising'
FALLING = 'falling'
class DIIRQChannel(IntEnum):
""" NI ELVIS III Digital Input Interrupt channel. """
DIO0 = 0
DIO1 = 1
DIO2 = 2
DIO3 = 3
|
import tensorflow as tf
import numopy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def input(dataset):
return dataset.images, dataset.labels.astype(np.int32)
feature_columns = [tf.feature_column.numeric_column("X", shape=[28, 28])]
classifier = tf.estimator.DNNClassifier(
feature_columns = feature_columns,
hidden_units=[500,500,500]
optimizer=tf.train.AdamOptimizer(),
num_classes=10,
dropout=0.8,
model_dir="./tmp/mnist_model"
)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": input(mnist)[0]},
y=input(mnist[1]),
num_epochs=None,
batch_size=50,
shuffle=True
)
classifier.train(input_gfn=train_input_fn, steps=1000)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": input(mnist.test)[0]},
y = input(mnist.test)[1],
num_epochs=1,
shuffle=False
)
accuracy_score = classifier.evaluate(input_fn=test_input_fn)["accuracy"]
print("\nTest Accuracy: {0:f}%\n".format(accuracy_score*100))
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function:square"""
import akg
from akg.utils import validation_check as vc_util
def square(data):
"""
Compute square.
Args:
data: Tensor.
Return:
Tensor, has the same shape and type as data.
"""
vc_util.ops_dtype_check(data.dtype, vc_util.DtypeForDavinci.ALL_TYPES)
shape = [x.value for x in data.shape]
vc_util.check_shape(shape)
res = akg.lang.cce.vmul(data, data)
return res
|
from __future__ import division
import numpy as np
import cgt
from cgt import nn
import types
import dill as pickle
from matplotlib import pyplot as plt
def make_variable(name, shape):
return "{0} = cgt.tensor(cgt.floatX, {2}, fixed_shape={1})".format(name, shape, len(shape))
def normalize(var):
return cgt.broadcast("/", var, cgt.sum(var,axis=2,keepdims=True), "xxx,xx1")
class struct(dict):
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
class Params(object):
def __init__(self,params):
assert all(param.is_data() and param.dtype == cgt.floatX for param in params)
self._params = params
@property
def params(self):
return self._params
def get_values(self):
return [param.op.get_value() for param in self._params]
def get_shapes(self):
return [param.op.get_shape() for param in self._params]
def get_total_size(self):
return sum(np.prod(shape) for shape in self.get_shapes())
def num_vars(self):
return len(self._params)
def set_values(self, parvals):
assert len(parvals) == len(self._params)
for (param, newval) in zip(self._params, parvals):
param.op.set_value(newval)
param.op.get_shape() == newval.shape
def set_value_flat(self, theta):
theta = theta.astype(cgt.floatX)
arrs = []
n = 0
for shape in self.get_shapes():
size = np.prod(shape)
arrs.append(theta[n:n+size].reshape(shape))
n += size
assert theta.size == n
self.set_values(arrs)
def get_value_flat(self):
theta = np.empty(self.get_total_size(),dtype=cgt.floatX)
n = 0
for param in self._params:
s = param.op.get_size()
theta[n:n+s] = param.op.get_value().flat
n += s
assert theta.size == n
return theta
def backtrack(func, x, dx, f, g, alpha=0.25, beta=0.9, lb=1e-6, ub=1):
t = ub
while func(x + t*dx) >= f + t*alpha*g.dot(dx):
t *= beta
if t < lb:
print "Warning: backtracking hit lower bound of {0}".format(lb)
break;
return beta
class Solver(struct):
plot_func = None
lr_decay = 1
lr_step = 10000
plot_step = 100
fname = None
def __init__(self, args):
"""
@param args, a dict containing:
paramters: alpha, momentum, lr_decay, lr_step, min_alpha
rmsp: gamma
etc: plot_step, plot_func, fname, snapstep
"""
struct.__init__(self, **args)
if self.plot_func:
self.plot = plt.figure(); plt.ion(); plt.show(0)
def initialize(self, model):
self.model = model
self.dtheta = np.zeros_like(model.theta)
self._iter, self.loss = 0, [[]]
def check_nan(self, loss, grad):
return np.isnan(loss) or np.isnan(np.linalg.norm(grad))
def decay_lr(self):
if self._iter % self.lr_step == 0:
self.alpha *= self.lr_decay
self.alpha = max(self.alpha, self.min_alpha)
def draw(self):
if self._iter % self.plot_step == 0:
self.loss[-1] = np.mean(self.loss[-1])
self.loss.append([])
if self.plot_func:
self.plot.clear()
self.plot_func(range(0,self._iter,self.plot_step), self.loss)
plt.draw()
def snapshot(self):
if self.fname and self._iter % self.snap_step == 0:
self.model.dump(self.fname.format(self._iter),
{k:v for k,v in self.items() if type(v) != types.FunctionType and not isinstance(v, Model)})
def update(self, loss, grad, acc, disp):
if self.check_nan(loss, grad):
if self.pass_nan:
print "something is nan, skipping..."
return np.zeros_like(self.dtheta),False
else:
return np.zeros_like(self.dtheta),True
self._iter += 1; self.loss[-1].append(float(loss))
if acc is not None and disp:
print "iter: {:0d}, loss: {:1.4e}, gnorm: {:2.4e}, alpha: {:3.4e}, acc: {:4.4f}"\
.format(self._iter, float(loss), np.abs(grad).max(), self.alpha, acc)
elif disp:
print "iter: {:0d}, loss: {:1.4e}, gnorm: {:2.4e}, alpha: {:3.4e}"\
.format(self._iter, float(loss), np.abs(grad).max(), self.alpha)
self.decay_lr()
self.draw()
self.snapshot()
return self.dtheta,False
class RMSP(Solver):
def initialize(self, theta):
Solver.initialize(self, theta)
self.sqgrad = np.zeros_like(theta) + 1e-6
self.method = "rmsp"
def update(self, loss, grad, acc=None, disp=True):
self.sqgrad = self.gamma*self.sqgrad + (1-self.gamma)*np.square(grad)
self.dtheta = self.momentum * self.dtheta - \
(1-self.momentum) * self.alpha * grad / np.sqrt(self.sqgrad)
return Solver.update(self, loss, grad, acc, disp)
class CG(Solver):
def initialize(self, theta):
Solver.initialize(self, theta)
self.prev_grad = np.zeros_like(theta)
self.method = 'cg'
def update(self, loss, grad, acc=None, disp=True):
dx,dxp = -grad, self.prev_grad
beta = max(0, dx.dot(dx - dxp) / (dxp.dot(dxp)-1e-10) )
self.dtheta = dx + beta*dxp
if self.allow_backtrack:
self.alpha = backtrack(self.model.func, self.model.theta, self.dtheta, loss, grad)
self.dtheta *= self.alpha
self.prev_grad = dx
return Solver.update(self, loss, grad, acc, disp)
"""
not yet implemented:
class LBFGS(Solver):
def initialize(self, theta):
Solver.initialize(self, theta)
self.method = "lbfgs"
def update(self, loss, grad, acc):
q = grad; s,y = self.s, self.y
for i in range(m):
q -= 0
return Solver.update(self, loss, grad, acc)
"""
class Model(object):
def initialize(self, loss, scale):
self._iter = 0
self.pc = Params(self.params)
cur_val = self.pc.get_value_flat()
idx = cur_val.nonzero()
new_val = np.random.uniform(-scale, scale, size=(self.pc.get_total_size(),))
new_val[idx] = cur_val[idx]
self.sync(new_val)
grad = cgt.concatenate([g.flatten() for g in cgt.grad(loss, self.params)])
return grad
def sync(self, theta):
self.theta = theta.copy()
self.pc.set_value_flat(self.theta)
def dump(self, fname, args={}):
with open(fname, 'wb') as f:
args["theta"], args["shapes"] = self.theta, self.pc.get_shapes()
pickle.dump(args, f)
def restore(self, fname):
with open(fname, 'rb') as f:
data = pickle.load(f)
self.sync(data["theta"])
return data
class TextData(object):
def __init__(self, input_file):
with open(input_file) as f:
raw = f.read()
f.close()
self._raw = raw
self._encoding, self._decoding = {}, {}
for ch in self._raw:
if ch not in self._encoding:
self._encoding[ch] = len(self._encoding)
self._decoding[self._encoding[ch]] = ch
self._heads = []
@property
def size(self):
return len(self._encoding)
def encode(self, ch):
return np.array([[1 if i == self._encoding[ch] else 0 for i in xrange(self.size)]])
def decode(self, num):
return self._decoding[num]
def get_batch(self, seq_length, batch_size):
seq_length += 1; batch = np.zeros((batch_size, seq_length, self.size), dtype=np.float32)
self._heads = [i * len(self._raw)//batch_size for i in range(batch_size)]
for b in range(batch_size):
seg = self._raw[self._heads[b]:self._heads[b] + seq_length]
batch[b] = np.array([self.encode(ch).ravel() for ch in seg])
self._heads[b] = (self._heads[b] + seq_length) % (len(self._raw) - seq_length)
batch = batch.transpose((1,0,2))
return batch[:-1], batch[1:]
|
# -*- coding: utf-8 -*-
import os
from datetime import datetime
import sqlite3
import pandas as pd
import openpyxl as op
# Получение имени таблицы для дальнейшего взаимодействия
def get_table(path="./second_input"):
print("Enter table name...")
name = str(input())
print("table name is " + name)
os.chdir(path)
names_list = os.listdir()
if not(name in names_list):
print("name not found.")
pass
else:
print("name found in folder.")
return name
# Создание большого dataframe для парсинга
def pars_table(name):
name_of_table = name
list_of_headers = ["Номенклатура", "Артикул", "Характеристики", "Единицы измерения", "Оптовая", "Розничные цены СПБ", "Остаток", "Заказать"]
dictionary_to_write = { header: 0 for header in list_of_headers}
dictionary_to_write["name"] = []
tamplate_of_data_frame = {header: [] for header in list_of_headers}
frame_of_tables = pd.DataFrame(tamplate_of_data_frame)
work_book = op.load_workbook(name)
# Номер заказа|Статус|Время создания заказа|Время оплаты|Стоимость товаров, Руб|Стоимость доставки, Руб|Сумма заказа, Руб|Скидка магазина, Руб|Оплачено клиентом, Руб|Сумма возврата, Руб|Возвраты|Товары|Артикул|Id товаров|Примечания к заказу (покупателя)|Примечания к заказу (продавца)|Имя получателя|Страна|Штат/провинция|Город|Адрес|Индекс|Телефон|Способ доставки|Отгрузка истекает|Трекинг номер|Время отправки|Время подтверждения покупателем|
for name in work_book.sheetnames:
sheet = work_book[name]
maxrow = sheet.max_row # Получение максимального значения строк данного листа книги
maxcol = sheet.max_column # Получение максимального столбцов строк данного листа книги
print("rows: cols:")
print(maxrow, maxcol)
print("Generate dataFrame...")
# Номер заказа|Статус|Время создания заказа|Время оплаты|Стоимость товаров, Руб|Стоимость доставки, Руб|Сумма заказа, Руб|Скидка магазина, Руб|Оплачено клиентом, Руб|Сумма возврата, Руб|Возвраты|Товары|Артикул|Id товаров|Примечания к заказу (покупателя)|Примечания к заказу (продавца)|Имя получателя|Страна|Штат/провинция|Город|Адрес|Индекс|Телефон|Способ доставки|Отгрузка истекает|Трекинг номер|Время отправки|Время подтверждения покупателем|
for row in sheet.iter_rows(min_row=3, min_col=1, max_row=maxrow, max_col=maxcol):
index_of_headers = 0
for cell in row:
if index_of_headers == 0:
index = index_of_headers
if cell.value == "":
dictionary_to_write[list_of_headers[index]] = "None"
else:
dictionary_to_write[list_of_headers[index]] = cell.value
if index_of_headers > 11:
index = index_of_headers - 11
#print(index)
# if cell.value != None:
if cell.value == "" or cell.value == None:
dictionary_to_write[list_of_headers[index]] = "None"
else:
dictionary_to_write[list_of_headers[index]] = cell.value
index_of_headers += 1
dictionary_to_write["name"] = name_of_table
frame_of_tables = frame_of_tables.append(dictionary_to_write, ignore_index=True)
#set_option для выведения всех колонок ataFrame
pd.set_option('display.max_columns', None)
#print(frame_of_tables)
return frame_of_tables
# запись выбранных значений в sql бд
def make_table_for_sql(frame_of_tables, conn):
first_grade_list = ["Оптика", "Аксесуары для оптика", "Горны и рожки", "Дартс", "Засидки и лабазы", "Звуковые имитаторы",
"Кронштейны и инструменты", "Манки, приманки, нейтрализаторы", "Мишени", "Ножи", "Ножи",
"Одежда и обувь", "Оптоволоконные мушки", "Пневматика", "Рогатки", "Снаряжение", "Спортивная стрельба",
"Средства для чистки и смазки оружия", "Сумки и рюкзаки", "Термосы", "Товары для собак",
"Тюнинг оружия", "Фонарики и ЛЦУ", "Холодная пристрелка оружия", "Чехлы и кейсы для оружия", "Чучела", "Уцененные товары"]
second_grade_list = ["Оптические прицелы", "Бинокли","Дальномеры", "Зрительные трубы", "Коллиматорные прицелы",
"Монокуляры", "Тепловизионные приборы", "Цифровые приборы День/ночь",'Крышки для прицелов "Butler Creek"',
"Разное", "Ameristep (США)", "ShotTime", "Аксессуары", "iHunt", 'Звуковые имитаторы "Cass Creek" (США)',
'Звуковые имитаторы "Mini Phantom" (США)', 'Звуковые имитаторы "MUNDI SOUND" (Испания)', 'Инструменты',
'Кронштейны', 'Манки Buck Expert (Канада)', "Манки FAULK'S (США)","Манки Helen Baud (Франция)",
"Манки Hubertus (Германия)", "Манки Mankoff (Россия)", "Манки Mick Lacy (Канада)",
"Манки Nordik Predator (Швеция)", "Манки PRIMOS", "Нейтрализаторы запаха", "Приманки Buck Expert (Канада)",
"LionSteel(Италия)", "McNETT TACTICAL(США)", "Morakniv(Швеция)", "Opinel(Франция)", "Sanrenmu(Китай)",
"Tekut(Китай)", "Маски", "Перчатки", "Стрелковые и разгрузочные жилеты", "HIVIZ (США)", "NIMAR (Италия)",
"TRUGLO (США)", "БАЛЛОНЧИКИ CO2", "Пневматические пистолеты", "Пульки и шарики для пневматики",
"СТРАЙКБОЛ", "Рогатки", "Шарики для рогаток", "Антабки", "Камуфляжная лента", "Кейсы и ящики для патронов, снаряжения и чистки оружия",
"Кобуры и сумки для пистолетов", "Ремни, патронташи и подсумки", "Сошки и опоры для оружия",
"Стулья", "Тыльники", "Машинки для метания", "Стрелковые наушники и беруши","Стрелковые очки",
"Масла и смазки", "Наборы для чистки", "Рюкзаки", "Сумки", "Ягдташи", "THERMOcafe (гарантия 1 год)",
"THERMOS (гарантия 5 лет)", "Термосумки и хладоэлементы", "ATA MOULD (Турция)", "Pufgun (Россия)",
"Red Heat/AKADEMIA (Россия)", "МОЛОТ (Россия)", "Тюнинг Hartman (Россия)", "Тюнинг Leapers UTG (США)",
"Тюнинг VS (Россия)", "Тюнинг Россия", "Лазерные целеуказатели", "Разное", "Фонари LEAPERS UTG (США)",
"Фонари NexTORCH (Китай)", "Фонари Sightmark", "Firefield (США)", "Nikko Stirling", "Red-I (ЮАР)",
"ShotTime", "Sightmark (США)", "Замки на оружие", "Кейсы для оружия", "Чехлы", "Аксессуары",
"Чучела BIRDLAND (Китай)", "Чучела SPORTPLAST (Италия)", "Оптика - УТ", "Тепловизионные приборы -УТ"]
third_grade_list = ["LEAPERS UTG (США)", "LEUPOLD (США)", "NIKKO STIRLING", "SWAROVSKI (Австрия)", "TARGET OPTIC (Китай)",
"Прицелы (Китай)", "Бинокли BUSHNELL (США)", "Бинокли GAUT (Китай)", "Бинокли LEUPOLD (США)",
"Бинокли NIKON (Япония)", "Бинокли STEINER (Германия)", "Бинокли VANGUARD (Китай)",
"GAUT", "Leupold / Redfield", "Nikko Stirling", "Nikon", "Sightmark", "Vortex", "Коллиматорные прицелы Aimpoint (Швеция)",
"Коллиматорные прицелы Firefield (США)", "Коллиматорные прицелы Holosun (США)", "Коллиматорные прицелы Leapers UTG (США)",
"Коллиматорные прицелы Redring (Швеция)", "Коллиматорные прицелы SIGHTMARK (США)",
"Коллиматорные прицелы Target Optic (Китай)", "Коллиматорные прицелы Tokyo Scope/Hakko (Япония)",
"Тепловизионные монокуляры", "Тепловизионные прицелы", "Аксессуары", "Прицелы", "ATN", "Contessa Alessandro (Италия)",
"EAW Apel (Германия)", "Innomount (Германия)", "Leapers UTG (США)", "Leupold (США)",
"MAK (Германия)", "RECKNAGEL (Германия)", "Кронштейны (Китай)", "Кронштейны (Россия и Белоруссия)",
"Пистолеты Cybergun(Swiss Arms, Франция)", "Пистолеты Stalker", "Пульки для пневматики", "Шарики для пневматики",
"Allen", "McNett", "Negrini (Италия)", "PLANO (США)", "Разное",
"Патронташи", "Подсумки", "Ремни", "Опоры для оружия", "Сошки для оружия", "Walkstool (Швеция)",
"3M Peltor(США)", "Allen(США)", "Artilux(Швейцария)", "CassCreek(США)", "Howard Leight(США)",
"MSA(Швеция)", "Pro Ears(США)", "Rifleman(США)", "ShotTime", "Стрелковые очки 3M (США)",
"Стрелковые очки Allen", "Стрелковые очки ARTILUX (Швейцария)", "Стрелковые очки Randolph Engineering Inc.(США)",
"Стрелковые очки Stalker (Тайвань)", "BALLISTOL (Германия)", "Bore Tech (США)", "Butch's (США)",
"Iosso (США)", "KANO (США)", "KG Industries (США)", "Milfoam (Финляндия)", "SWEET'S (Австралия)",
"Waffen Care (Германия)", "Треал-М (Россия)", "A2S GUN (Россия)", "Bore Tech (США)",
"DAC - Универсальные наборы (США)", "HOPPE'S (США)", "J.DEWEY (США)", "NIMAR (Италия)",
"ShotTime", "Stil Crin (Италия)", "Лазерные целеуказатели Holosun", "Лазерные целеуказатели Leapers",
"Аксессуары для фонарей NexTORCH(Китай)", "Фонари NexTORCH(Китай)", "NEGRINI (Италия)",
"PLANO (США)", "ALLEN (США)", "LEAPERS UTG (США)", "ВЕКТОР (Россия)"]
list_of_headers = ["Номенклатура", "Артикул", "Оптовая", "Розничные цены СПБ", "Остаток"]
dict_to_write = {header: list() for header in list_of_headers}
status_to_write = {"first_status": (), "second_status": (), "third_status": ()}
list_to_sql = []
status_to_write["first_status"] = "None"
status_to_write["second_status"] = "None"
status_to_write["third_status"] = "None"
#Остатки СПБ 21.05.2021.xlsx
cursor = conn.cursor()
frame_of_tables_loc = frame_of_tables[list_of_headers]
print("Ganerate dataFrame with statuses")
for loc in range(len(frame_of_tables_loc.index)):
row = frame_of_tables_loc.iloc[loc]
#print(row)
#print(row.to_dict())
for elem in row:
if type(elem) == str:
elem = elem.strip()
if elem == "":
elem = "None"
if elem in first_grade_list:
status_to_write["first_status"] = elem
status_to_write["second_status"] = "None"
status_to_write["third_status"] = "None"
break
elif elem in second_grade_list:
status_to_write["second_status"] = elem
status_to_write["third_status"] = "None"
break
elif elem in third_grade_list:
status_to_write["third_status"] = elem
break
dict_to_write = row.to_dict()
list_to_sql.append(frame_of_tables['name'][loc])
# print(list_to_sql)
list_to_sql.append(str(datetime.now()))
# print(list_to_sql)
list_to_sql = list_to_sql + list(dict_to_write.values())
# print(list_to_sql)
list_to_sql = list_to_sql + list(status_to_write.values())
# print(dict_to_write)
cursor.executemany("""INSERT INTO out VALUES (?,?,?,?,?,?,?,?,?,?);""", (list_to_sql,))
list_to_sql = []
conn.commit()
print("END")
# подключение или создание базы данных
def open_db():
os.chdir("../second_output")
db_name = "second_out.db"
conn = sqlite3.connect(db_name)
return conn
# создание таблицы в бд
def create_table_out(conn):
cursor = conn.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS out(
filename VARCHAR,
created_dt DATETIME,
product_name VARCHAR,
article VARCHAR,
wholesale DECIMAL(9, 2),
retail_price DECIMAL(9, 2),
stock INTEGER,
first_status TEXT,
second_status TEXT,
third_status TEXT
);""")
conn.commit()
if __name__ == "__main__":
name = get_table()
frame_of_tables = pars_table(name)
conn = open_db()
create_table_out(conn)
make_table_for_sql(frame_of_tables, conn)
|
def even(num):
return num%2 == 0
a = [x for x in range(101)]
x = list(filter(even, a))
print(x)
|
from pysumma.Decisions import Decisions
from pysumma.Option import Option
from pysumma.ModelOutput import ModelOutput
import os # get directory or filename from filepath
import subprocess # run shell script in python
import shlex # splite shell script
import xarray as xr # create xarray data from summa output (NetCDF file)
class Simulation:
# set filepath parameter as a a directory and a filename of file manager text file
def __init__(self, filepath):
# create self object from file manager text file
self.filepath = os.path.abspath(filepath)
self.file_dir = os.path.dirname(self.filepath)
self.file_contents = self.open_read()
self.fman_ver = FileManagerOption(self, 'fman_ver')
self.setting_path = FileManagerOption(self, 'setting_path')
self.input_path = FileManagerOption(self, 'input_path')
self.output_path = FileManagerOption(self, 'output_path')
self.decision_path = FileManagerOption(self, 'decision')
self.meta_time = FileManagerOption(self, 'meta_time')
self.meta_attr = FileManagerOption(self, 'meta_attr')
self.meta_type = FileManagerOption(self, 'meta_type')
self.meta_force = FileManagerOption(self, 'meta_force')
self.meta_localpar = FileManagerOption(self, 'meta_localpar')
self.OUTPUT_CONTROL = FileManagerOption(self, 'OUTPUT_CONTROL')
self.meta_index = FileManagerOption(self, 'meta_index')
self.meta_basinpar = FileManagerOption(self, 'meta_basinpar')
self.meta_basinvar = FileManagerOption(self, 'meta_basinvar')
self.local_attr = FileManagerOption(self, 'local_attr')
self.local_par = FileManagerOption(self, 'local_par')
self.basin_par = FileManagerOption(self, 'basin_par')
self.forcing_list = FileManagerOption(self, 'forcing_list')
self.initial_cond = FileManagerOption(self, 'initial_cond')
self.para_trial = FileManagerOption(self, 'para_trial')
self.output_prefix = FileManagerOption(self, 'output_prefix')
self.base_dir = filepath.split('/settings')[0]
# create self object from decision text file
self.decision_obj = Decisions(self.base_dir + '/settings/' + self.decision_path.value)
#
#self.modeloutput_obj = ModelOutput(self.base_dir + '/settings/' + self.OUTPUT_CONTROL.value)
def open_read(self):
# read filemanager text file
with open(self.filepath, 'rt') as f:
# read every line of filemanager and return as list format
return f.readlines()
def execute(self, run_suffix, run_option, specworker_img = None):
# set run_suffix to distinguish the output name of summa
self.run_suffix = run_suffix
# 'local' run_option runs summa with summa execution file where is in a local computer.
if run_option == 'local':
cmd = "{} -p never -s {} -m {}".format(self.executable, self.run_suffix, self.filepath)
# run shell script in python and print output
cmd = shlex.split(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = p.communicate()[0].decode('utf-8')
print(output)
if 'FATAL ERROR' in output:
raise Exception("SUMMA failed to execute!")
# define output file name as sopron version of summa
out_file_path = self.output_path.filepath + \
self.output_prefix.value + '_output_' + \
self.run_suffix + '_timestep.nc'
# 'docker_sopron_2018' run_option runs summa with docker hub online, and the version name is "'uwhydro/summa:sopron_2018'.
elif run_option == "docker_sopron_2018":
self.executable = 'uwhydro/summa:sopron_2018'
cmd = "docker run -v {}:{}".format(self.file_dir, self.file_dir) + \
" -v {}:{}".format(self.setting_path.filepath, self.setting_path.filepath) + \
" -v {}:{}".format(self.input_path.filepath, self.input_path.filepath) + \
" -v {}:{}".format(self.output_path.filepath, self.output_path.filepath) + \
" {} -p never -s {} -m {}".format(self.executable, self.run_suffix, self.filepath)
# run shell script in python and print output
cmd = shlex.split(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = p.communicate()[0].decode('utf-8')
print(output)
if 'FATAL ERROR' in output:
raise Exception("SUMMA failed to execute!")
# define output file name as sopron version of summa
out_file_path = self.output_path.filepath + \
self.output_prefix.value + '_output_' + \
self.run_suffix + '_timestep.nc'
# "specworker" run_option run summa with summa image in docker of HydroShare Jupyter Hub
elif run_option == "specworker":
from specworker import jobs
# define the image that we want to execute
if specworker_img == 'ncar/summa' or 'ncar/summa_sopron':
# save these paths in the env_vars dictionary which will be passed to the model
env_vars = {'LOCALBASEDIR': self.base_dir, 'MASTERPATH': self.filepath}
# define the location we want to mount these data in the container
vol_target = '/tmp/summa'
# define the base path of the input data for SUMMA
vol_source = self.base_dir
# run the container with the arguments specified above
res = jobs.run(specworker_img, '-x', vol_source, vol_target, env_vars)
# define output file name as sopron version of summa
out_file_path = self.base_dir + self.output_path.value.split('>')[1] + \
self.output_prefix.value + '_' + \
self.decision_obj.simulStart.value[0:4] + '-' + \
self.decision_obj.simulFinsh.value[0:4] + '_' + \
self.run_suffix + '1.nc'
else:
raise ValueError('You need to deinfe the exact SUMMA_image_name')
else:
raise ValueError('No executable defined. Set as "executable" attribute of Simulation or check run_option')
return xr.open_dataset(out_file_path), out_file_path
class FileManagerOption(Option):
# key_position is the position in line.split() where the key name is
# value_position is the position in line.split() where the value is
# By default, delimiter=None, but can be set to split each line on different characters
def __init__(self, parent, name):
super().__init__(name, parent, key_position=1, value_position=0, delimiter="!")
# get value to change file manager value (value : a line divides by delimiter "!", and directory and filename are value)
@property
def value(self):
return self.get_value()
# change old value by new value
@value.setter
def value(self, new_value):
self.write_value(old_value=self.value, new_value=new_value)
# filepath is the path up to the filename, not including it
@property
def filepath(self):
if not self.value.endswith('/'):
return "/".join(self.value.split('/')[:-1]) + "/"
else:
return self.value
# Replace the filepath in the value in fileManager.txt
@filepath.setter
def filepath(self, new_filepath):
value = new_filepath + self.filename
self.write_value(old_value=self.value, new_value=value)
# Returns the file name of the FileManagerOption
@property
def filename(self):
return self.value.split('/')[-1]
@filename.setter
def filename(self, new_filename):
value = self.filepath + new_filename
self.write_value(old_value=self.value, new_value=value)
|
import json
import lzma
import tempfile
import zipfile
from io import StringIO
from collections import namedtuple
from datetime import date
from pathlib import Path
from celery import shared_task
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import timezone
from capapi.documents import CaseDocument
from capapi.serializers import NoLoginCaseDocumentSerializer, CaseDocumentSerializer
from capdb.models import Jurisdiction, Reporter
from capdb.storages import download_files_storage
from scripts.helpers import HashingFile
def init_export(changelog):
# setup vars
version_string = date.today().strftime('%Y%m%d')
template_dir = Path(settings.BASE_DIR, 'capdb/templates/bulk_export')
output_path = Path('bulk_exports', version_string)
if download_files_storage.exists(str(output_path / 'README.md')):
print("Cannot init export; %s already exists" % output_path)
return
# write readme files
print("Writing README files to %s" % output_path)
for path in template_dir.rglob('*'):
if path.is_dir():
continue
path = path.relative_to(template_dir)
contents = render_to_string('bulk_export/%s' % path, {
'changes': changelog,
'export_date': date.today(),
})
download_files_storage.save('bulk_exports/%s/%s' % (version_string, path), StringIO(contents))
# run export
export_all(version_string)
def export_all(version_string):
"""
Queue celery tasks to export all jurisdictions and reporters.
If before_date is provided, only queue jobs where the last export's export_date was less than before_date.
"""
for model, task in ((Jurisdiction, export_cases_by_jurisdiction), (Reporter, export_cases_by_reporter)):
print("Queueing %s" % model.__name__)
for item in model.objects.all():
print("- Adding %s" % item)
task.delay(version_string, item.pk)
@shared_task
def export_cases_by_jurisdiction(version_string, id):
"""
Write a .jsonl.gz file with all cases for jurisdiction.
"""
jurisdiction = Jurisdiction.objects.get(pk=id)
cases = CaseDocument.raw_search().filter("term", jurisdiction__id=id)
if cases.count() == 0:
print("WARNING: Jurisdiction '{}' contains NO CASES.".format(jurisdiction.name))
return
out_path = Path(
"bulk_exports",
version_string,
"by_jurisdiction",
"{subfolder}",
jurisdiction.slug,
"%s_{case_format}_%s.zip" % (jurisdiction.slug, version_string)
)
export_case_documents(cases, out_path, jurisdiction, public=jurisdiction.whitelisted)
@shared_task
def export_cases_by_reporter(version_string, id):
"""
Write a .jsonl.gz file with all cases for reporter.
"""
reporter = Reporter.objects.get(pk=id)
cases = CaseDocument.raw_search().filter("term", reporter__id=id)
if cases.count() == 0:
print("WARNING: Reporter '{}' contains NO CASES.".format(reporter.full_name))
return
out_path = Path(
"bulk_exports",
version_string,
"by_reporter",
"{subfolder}",
reporter.short_name_slug,
"%s_{case_format}_%s.zip" % (reporter.short_name_slug, version_string)
)
public = not reporter.case_metadatas.in_scope().filter(jurisdiction__whitelisted=False).exists()
export_case_documents(cases, out_path, reporter, public=public)
def try_to_close(file_handle):
"""
Cleanup helper used by exception handler. Try calling .close() on file_handle.
If this fails, presumably file_handle was never opened so no cleanup necessary.
"""
if file_handle:
try:
file_handle.close()
except Exception:
pass
def export_case_documents(cases, zip_path, filter_item, public=False):
"""
Export cases in queryset to dir_name.zip.
filter_item is the Jurisdiction or Reporter used to select the cases.
public controls whether export is downloadable by non-researchers.
"""
formats = {
'xml': {
'serializer': NoLoginCaseDocumentSerializer,
'query_params': {'body_format': 'xml'},
},
'text': {
'serializer': NoLoginCaseDocumentSerializer,
'query_params': {'body_format': 'text'},
},
'metadata': {
'serializer': CaseDocumentSerializer,
'query_params': {},
}
}
try:
# set up vars for each format
for format_name, vars in list(formats.items()):
# set up paths for zip file output
subfolder = 'case_metadata' if format_name == 'metadata' else 'case_text_open' if public else 'case_text_restricted'
vars['out_path'] = str(zip_path).format(subfolder=subfolder, case_format=format_name)
if download_files_storage.exists(vars['out_path']):
print("File %s already exists; skipping." % vars['out_path'])
del formats[format_name]
continue
vars['internal_path'] = Path(Path(vars['out_path']).stem)
vars['data_file_path'] = Path('data', 'data.jsonl.xz')
# set up bagit metadata files
vars['payload'] = []
vars['bagit'] = "BagIt-Version: 1.0\nTag-File-Character-Encoding: UTF-8\n"
vars['baginfo'] = (
"Source-Organization: Harvard Law School Library Innovation Lab\n"
"Organization-Address: 1545 Massachusetts Avenue, Cambridge, MA 02138\n"
"Contact-Name: Library Innovation Lab\n"
"Contact-Email: info@case.law\n"
"External-Description: Cases for %s\n"
"Bagging-Date: %s\n"
) % (filter_item, timezone.now().strftime("%Y-%m-%d"))
# fake Request object used for serializing cases with DRF's serializer
vars['fake_request'] = namedtuple('Request', ['query_params', 'accepted_renderer'])(
query_params=vars['query_params'],
accepted_renderer=None,
)
# create new zip file in memory
vars['out_spool'] = tempfile.TemporaryFile()
vars['archive'] = zipfile.ZipFile(vars['out_spool'], 'w', zipfile.ZIP_STORED)
vars['data_file'] = tempfile.NamedTemporaryFile()
vars['hashing_data_file'] = HashingFile(vars['data_file'], 'sha512')
vars['compressed_data_file'] = lzma.open(vars['hashing_data_file'], "w")
# write each case
for item in cases.scan():
for format_name, vars in formats.items():
serializer = vars['serializer'](item['_source'], context={'request': vars['fake_request']})
vars['compressed_data_file'].write(bytes(json.dumps(serializer.data), 'utf8') + b'\n')
# finish bag for each format
for format_name, vars in formats.items():
# write temp data file to bag
vars['compressed_data_file'].close()
vars['data_file'].flush()
vars['payload'].append("%s %s" % (vars['hashing_data_file'].hexdigest(), vars['data_file_path']))
vars['archive'].write(vars['data_file'].name, str(vars['internal_path'] / vars['data_file_path']))
vars['data_file'].close()
# write bagit metadata files and close zip
vars['archive'].writestr(str(vars['internal_path'] / "bagit.txt"), vars['bagit'])
vars['archive'].writestr(str(vars['internal_path'] / "bag-info.txt"), vars['baginfo'])
vars['archive'].writestr(str(vars['internal_path'] / "manifest-sha512.txt"), "\n".join(vars['payload']))
vars['archive'].close()
# copy temp file to permanent storage
vars['out_spool'].seek(0)
download_files_storage.save(vars['out_path'], vars['out_spool'])
vars['out_spool'].close()
finally:
# in case of error, make sure anything opened was closed
for format_name, vars in formats.items():
for file_handle in ('compressed_data_file', 'data_file', 'archive', 'out_spool'):
try_to_close(vars.get(file_handle))
|
import os,sys
import math
import random
import numpy as np
from scipy import stats
# data format
# Y, x_value/level_1,...,x_value/level_1/level_2,...
n = int(sys.argv[1])
p = int(sys.argv[2])/5
ntimes = int(sys.argv[3]) # which branch (5 subtrees in total: 5 top level features)
features = [] # store all features, level by level, each level has 2^(l-1) features, each feature is a n-dim vector
structures = [] # store all structures, level by level, each level has 2^(l-1) structures, each structure is a string
## features and structures for the top level
L = 0
features_0 = []
structures_0 = [str(ntimes)]
top_Z = np.load('top_z_vals.npy')[:,ntimes]
features_0.append(np.ones(n,dtype=int).tolist())
features.append(features_0)
structures.append(structures_0)
rec_length = 10.0 # [-5, +5]
# from the second level
count = 1 # the number of features
flag = 0
while flag == 0: # l: layer
L += 1 # current layer
n_features = int(math.pow(2,L))
#print('layer:',L,',#features:',n_features)
features_l = []
structures_l = []
# create buckets
buckets = []
for i in range(n_features):
r = (-5+i*rec_length/n_features,-5+(i+1)*rec_length/n_features)
buckets.append(r)
for i in range(n_features):
fvals = [0]*n
b = buckets[i]
for j in range(n):
if top_Z[j] >= b[0] and top_Z[j] <= b[1]:
fvals[j] = 1
features_l.append(fvals)
# add structures
parent_idx = int(i/2)
parent_layer = L-1
parent_structure = structures[parent_layer][parent_idx]
cur_structure = parent_structure + '/' + str(i)
structures_l.append(cur_structure)
count += 1
if count >= p:
flag = 1
break
features.append(features_l)
structures.append(structures_l)
#print(features,'\n',structures)
#sys.exit(1)
# formatting outputs
# each instance: feature_value (x),structure_value (layer) ; feature_value (x),structure_value (layer) ; ...
#np.random.seed(seed)
#beta = np.random.uniform(-1,1,count)
layers = len(features)
for i in range(n):
output_line = []
fvals = []
for l in range(layers):
nf = len(features[l])
for f in range(nf):
output_line.append(str(features[l][f][i]) + ',' + structures[l][f])
fvals.append(features[l][f][i])
#if np.dot(beta,fvals) > 0:
# Y = 1
#else:
# Y = 0
print(';'.join(output_line))# + ';' + str(Y))
|
LIST_BIN = [
[1,1,1,1],
[0,1,1,0],
[0,1,0,1],
[0,1,9,1],
[1,1,1,1]
]
class ElementCrawler():
def __init__(self, list_bin):
self.list_bin = list_bin
self.element_line = None
self._continue = True
self.len = len(list_bin)
self.response = []
def find_element_line(self):
"""=====================================================================
Método que retorna linha que se encontra elemento a ser encontrado.
====================================================================="""
for i in range(self.len):
if 9 in self.list_bin[i]:
return i
def set_line(self, line):
"""=====================================================================
Método que mostra caminho na matriz e cria lista algoritimo necessário.
====================================================================="""
continue_line = True
for i in range(len(self.list_bin[line])):
conditions = [ # condição para descer
all([
line != self.element_line,
self.list_bin[line][i] == 1,
self.list_bin[line + 1][i] == 1,
continue_line
]),
# condição para virar a direita
all([
line != self.element_line,
self.list_bin[line][i] == 1,
self.list_bin[line + 1][i] != 1,
continue_line
]),
# condição para virar a direita na linha do destino
all([
line == self.element_line,
self.list_bin[line][i] == 1,
continue_line
]),
# condição de chegada
all([
line == self.element_line,
self.list_bin[line][i] == 9,
continue_line
])
]
if conditions[0]:
self.list_bin[line][i] = 'v'
self.response.append('v')
continue_line = False
elif conditions[1]:
self.list_bin[line][i] = '>'
self.response.append('>')
elif conditions[2]:
self.list_bin[line][i] = '>'
self.response.append('>')
elif conditions[3]:
self.response.append('x')
self._continue = False
continue_line = False
def start(self):
"""=====================================================================
Método que seta e mostra cada linha da matriz e enfim retorna algoritmo.
====================================================================="""
self.element_line = self.find_element_line()
for line in range(self.len):
if self._continue:
self.set_line(line)
print(self.list_bin[line])
return self.response
x = ElementCrawler(LIST_BIN)
print('Algoritimo para chegar até o elemento: ', x.start())
|
import httplib
import urllib
import json
import hashlib
import hmac
from pprint import pprint
import csv
#import pyexcel_ods
import numpy as np
import requests
import collections
import poloniex
from array import *
import re
import logging
import time
#from pyexcel_ods import save_data
t0 = time.time()
polo = poloniex.Poloniex()
#Price Dictionary emptry creation
pricedict = {}
#coinpairs
coin1 = 'btc'
print 'Coin 1 %s' % coin1
coin2 = 'eth'
print 'Coin 2 %s' % coin2
#Poloniex Pair
polo1 = coin1 + '_' + coin2
polopair = polo1.upper()
print polopair
#C-cex pair
ccexpair = coin2 + '-' + coin1
print ccexpair
#Yobitpair
yobitpair = coin2 + '_' + coin1
print yobitpair
# get Poloniex Orderbook
pricedict['polo'] = polo.api('returnOrderBook', {'currencyPair':polopair})
print 'Polo Bids/Buys'
print pricedict['polo']['bids'][0]
print 'Polo Asks/Sells'
print pricedict['polo']['asks'][0]
#Get C-Cex Orderbook
ordertype = 'both' #can be buy,sell or both
orderdepth = '5' #how many orders to pull
puburl = 'https://c-cex.com/t/api_pub.html?a=getorderbook&market='
url1 = str(puburl+ccexpair+'&type='+ordertype+'&depth='+orderdepth) #api url to string
orderdata1 = requests.get(url1) #get json data from website
ccex = orderdata1.json()
pricedict['ccex'] = ccex
print '-----------'
print 'C-cex Sell'
print pricedict['ccex']['result']['sell'][0]
print 'C-cex Buy'
print pricedict['ccex']['result']['buy'][0]
#Get YObit Prices
puburl = 'https://yobit.net/api/3/depth/'
url = str(puburl+yobitpair) #api url to string
#print url
orderdata = requests.get(url) #get json data from website
#print orderdata
yobitorders = orderdata.json()
print '-----------'
pricedict['yobit'] = yobitorders[yobitpair]
print 'YoBit Bids/Buys'
print pricedict['yobit']['bids'][0]
print 'YoBit Asks/Sells'
print pricedict['yobit']['asks'][0]
print 'Ccex->YoBit'
ccexbuy = pricedict['ccex']['result']['sell'][0]['Rate']#*pricedict['ccex']['result']['sell'][0]['Quantity']
print 'ccex buy price'
print ccexbuy
yobitsell = pricedict['yobit']['bids'][0][0] #*pricedict['yobit']['bids'][0][1]
print 'yobit sell price'
print yobitsell
pricediff = yobitsell-ccexbuy
print 'Pricediff'
print pricediff
print 'Profit pct'
profit = (pricediff/ccexbuy)*100
print profit
minvol = min(pricedict['ccex']['result']['sell'][0]['Quantity'],pricedict['yobit']['bids'][0][1])
print 'minvol'
print minvol
btcprofit = pricediff * minvol
print 'BTC profit'
print btcprofit
print '-------------'
print 'YoBit->Ccex'
yobitbuy = pricedict['yobit']['asks'][0][0] #*pricedict['yobit']['bids'][0][1]
print 'yobit buy price'
print yobitbuy
ccexsell = pricedict['ccex']['result']['buy'][0]['Rate']#*pricedict['ccex']['result']['sell'][0]['Quantity']
print 'ccex sell price'
print ccexsell
pricediff = ccexsell-yobitbuy
print 'Pricediff'
print pricediff
print 'Profit pct'
profit = (pricediff/yobitbuy)*100
print profit
minvol = min(pricedict['ccex']['result']['buy'][0]['Quantity'],pricedict['yobit']['asks'][0][1])
print 'minvol'
print minvol
btcprofit = pricediff * minvol
print 'BTC profit'
print btcprofit |
"""
Preprocess pipeline
"""
import logging
import os.path
from functools import reduce
import numpy as np
import scipy.spatial as ss
from yass import read_config
from yass.batch import BatchPipeline, BatchProcessor, RecordingsReader
from yass.batch import PipedTransformation as Transform
from yass.explore import RecordingExplorer
from yass.preprocess.filter import butterworth
from yass.preprocess.standarize import standarize, standard_deviation
from yass.preprocess import whiten
from yass.preprocess import detect
from yass.preprocess import dimensionality_reduction as dim_red
from yass import neuralnetwork
def run(output_directory='tmp/'):
"""Execute preprocessing pipeline
Parameters
----------
output_directory: str, optional
Location to store partial results, relative to CONFIG.data.root_folder,
defaults to tmp/
Returns
-------
clear_scores: numpy.ndarray (n_spikes, n_features, n_channels)
3D array with the scores for the clear spikes, first simension is
the number of spikes, second is the nymber of features and third the
number of channels
spike_index_clear: numpy.ndarray (n_clear_spikes, 2)
2D array with indexes for clear spikes, first column contains the
spike location in the recording and the second the main channel
(channel whose amplitude is maximum)
spike_index_collision: numpy.ndarray (n_collided_spikes, 2)
2D array with indexes for collided spikes, first column contains the
spike location in the recording and the second the main channel
(channel whose amplitude is maximum)
Notes
-----
Running the preprocessor will generate the followiing files in
CONFIG.data.root_folder/output_directory/:
* ``config.yaml`` - Copy of the configuration file
* ``metadata.yaml`` - Experiment metadata
* ``filtered.bin`` - Filtered recordings
* ``filtered.yaml`` - Filtered recordings metadata
* ``standarized.bin`` - Standarized recordings
* ``standarized.yaml`` - Standarized recordings metadata
* ``whitened.bin`` - Whitened recordings
* ``whitened.yaml`` - Whitened recordings metadata
* ``rotation.npy`` - Rotation matrix for dimensionality reduction
* ``spike_index_clear.npy`` - Same as spike_index_clear returned
* ``spike_index_collision.npy`` - Same as spike_index_collision returned
* ``score_clear.npy`` - Scores for clear spikes
* ``waveforms_clear.npy`` - Waveforms for clear spikes
Examples
--------
.. literalinclude:: ../examples/preprocess.py
"""
logger = logging.getLogger(__name__)
CONFIG = read_config()
OUTPUT_DTYPE = CONFIG.preprocess.dtype
logger.info('Output dtype for transformed data will be {}'
.format(OUTPUT_DTYPE))
TMP = os.path.join(CONFIG.data.root_folder, output_directory)
if not os.path.exists(TMP):
logger.info('Creating temporary folder: {}'.format(TMP))
os.makedirs(TMP)
else:
logger.info('Temporary folder {} already exists, output will be '
'stored there'.format(TMP))
path = os.path.join(CONFIG.data.root_folder, CONFIG.data.recordings)
dtype = CONFIG.recordings.dtype
# initialize pipeline object, one batch per channel
pipeline = BatchPipeline(path, dtype, CONFIG.recordings.n_channels,
CONFIG.recordings.format,
CONFIG.resources.max_memory, TMP)
# add filter transformation if necessary
if CONFIG.preprocess.filter:
filter_op = Transform(
butterworth,
'filtered.bin',
mode='single_channel_one_batch',
keep=True,
if_file_exists='skip',
cast_dtype=OUTPUT_DTYPE,
low_freq=CONFIG.filter.low_pass_freq,
high_factor=CONFIG.filter.high_factor,
order=CONFIG.filter.order,
sampling_freq=CONFIG.recordings.sampling_rate)
pipeline.add([filter_op])
(filtered_path,), (filtered_params,) = pipeline.run()
# standarize
bp = BatchProcessor(
filtered_path, filtered_params['dtype'], filtered_params['n_channels'],
filtered_params['data_format'], CONFIG.resources.max_memory)
batches = bp.multi_channel()
first_batch, _, _ = next(batches)
sd = standard_deviation(first_batch, CONFIG.recordings.sampling_rate)
(standarized_path, standarized_params) = bp.multi_channel_apply(
standarize,
mode='disk',
output_path=os.path.join(TMP, 'standarized.bin'),
if_file_exists='skip',
cast_dtype=OUTPUT_DTYPE,
sd=sd)
standarized = RecordingsReader(standarized_path)
n_observations = standarized.observations
if CONFIG.spikes.detection == 'threshold':
return _threshold_detection(standarized_path, standarized_params,
n_observations, output_directory)
elif CONFIG.spikes.detection == 'nn':
return _neural_network_detection(standarized_path, standarized_params,
n_observations, output_directory)
def _threshold_detection(standarized_path, standarized_params, n_observations,
output_directory):
"""Run threshold detector and dimensionality reduction using PCA
"""
logger = logging.getLogger(__name__)
CONFIG = read_config()
OUTPUT_DTYPE = CONFIG.preprocess.dtype
TMP_FOLDER = os.path.join(CONFIG.data.root_folder, output_directory)
###############
# Whiten data #
###############
# compute Q for whitening
logger.info('Computing whitening matrix...')
bp = BatchProcessor(standarized_path, standarized_params['dtype'],
standarized_params['n_channels'],
standarized_params['data_format'],
CONFIG.resources.max_memory)
batches = bp.multi_channel()
first_batch, _, _ = next(batches)
Q = whiten.matrix(first_batch, CONFIG.neighChannels, CONFIG.spikeSize)
path_to_whitening_matrix = os.path.join(TMP_FOLDER, 'whitening.npy')
np.save(path_to_whitening_matrix, Q)
logger.info('Saved whitening matrix in {}'
.format(path_to_whitening_matrix))
# apply whitening to every batch
(whitened_path, whitened_params) = bp.multi_channel_apply(
np.matmul,
mode='disk',
output_path=os.path.join(TMP_FOLDER, 'whitened.bin'),
if_file_exists='skip',
cast_dtype=OUTPUT_DTYPE,
b=Q)
###################
# Spike detection #
###################
path_to_spike_index_clear = os.path.join(TMP_FOLDER,
'spike_index_clear.npy')
bp = BatchProcessor(
standarized_path,
standarized_params['dtype'],
standarized_params['n_channels'],
standarized_params['data_format'],
CONFIG.resources.max_memory,
buffer_size=0)
# clear spikes
if os.path.exists(path_to_spike_index_clear):
# if it exists, load it...
logger.info('Found file in {}, loading it...'
.format(path_to_spike_index_clear))
spike_index_clear = np.load(path_to_spike_index_clear)
else:
# if it doesn't, detect spikes...
logger.info('Did not find file in {}, finding spikes using threshold'
' detector...'.format(path_to_spike_index_clear))
# apply threshold detector on standarized data
spikes = bp.multi_channel_apply(
detect.threshold,
mode='memory',
cleanup_function=detect.fix_indexes,
neighbors=CONFIG.neighChannels,
spike_size=CONFIG.spikeSize,
std_factor=CONFIG.stdFactor)
spike_index_clear = np.vstack(spikes)
logger.info('Removing clear indexes outside the allowed range to '
'draw a complete waveform...')
spike_index_clear, _ = (detect.remove_incomplete_waveforms(
spike_index_clear, CONFIG.spikeSize + CONFIG.templatesMaxShift,
n_observations))
logger.info('Saving spikes in {}...'.format(path_to_spike_index_clear))
np.save(path_to_spike_index_clear, spike_index_clear)
path_to_spike_index_collision = os.path.join(TMP_FOLDER,
'spike_index_collision.npy')
# collided spikes
if os.path.exists(path_to_spike_index_collision):
# if it exists, load it...
logger.info('Found collided spikes in {}, loading them...'
.format(path_to_spike_index_collision))
spike_index_collision = np.load(path_to_spike_index_collision)
if spike_index_collision.shape[0] != 0:
raise ValueError('Found non-empty collision spike index in {}, '
'but threshold detector is selected, collision '
'detection is not implemented for threshold '
'detector so array must have dimensios (0, 2) '
'but had ({}, {})'
.format(path_to_spike_index_collision,
*spike_index_collision.shape))
else:
# triage is not implemented on threshold detector, return empty array
logger.info('Creating empty array for'
' collided spikes (collision detection is not implemented'
' with threshold detector. Saving them in {}'
.format(path_to_spike_index_collision))
spike_index_collision = np.zeros((0, 2), 'int32')
np.save(path_to_spike_index_collision, spike_index_collision)
#######################
# Waveform extraction #
#######################
# load and dump waveforms from clear spikes
path_to_waveforms_clear = os.path.join(TMP_FOLDER, 'waveforms_clear.npy')
if os.path.exists(path_to_waveforms_clear):
logger.info('Found clear waveforms in {}, loading them...'
.format(path_to_waveforms_clear))
waveforms_clear = np.load(path_to_waveforms_clear)
else:
logger.info('Did not find clear waveforms in {}, reading them from {}'
.format(path_to_waveforms_clear, standarized_path))
explorer = RecordingExplorer(
standarized_path, spike_size=CONFIG.spikeSize)
waveforms_clear = explorer.read_waveforms(spike_index_clear[:, 0])
np.save(path_to_waveforms_clear, waveforms_clear)
logger.info('Saved waveform from clear spikes in: {}'
.format(path_to_waveforms_clear))
#########################
# PCA - rotation matrix #
#########################
# compute per-batch sufficient statistics for PCA on standarized data
logger.info('Computing PCA sufficient statistics...')
stats = bp.multi_channel_apply(
dim_red.suff_stat,
mode='memory',
spike_index=spike_index_clear,
spike_size=CONFIG.spikeSize)
suff_stats = reduce(lambda x, y: np.add(x, y), [e[0] for e in stats])
spikes_per_channel = reduce(lambda x, y: np.add(x, y),
[e[1] for e in stats])
# compute rotation matrix
logger.info('Computing PCA projection matrix...')
rotation = dim_red.project(suff_stats, spikes_per_channel,
CONFIG.spikes.temporal_features,
CONFIG.neighChannels)
path_to_rotation = os.path.join(TMP_FOLDER, 'rotation.npy')
np.save(path_to_rotation, rotation)
logger.info('Saved rotation matrix in {}...'.format(path_to_rotation))
main_channel = spike_index_clear[:, 1]
###########################################
# PCA - waveform dimensionality reduction #
###########################################
if CONFIG.clustering.clustering_method == 'location':
logger.info('Denoising...')
path_to_denoised_waveforms = os.path.join(TMP_FOLDER,
'denoised_waveforms.npy')
if os.path.exists(path_to_denoised_waveforms):
logger.info('Found denoised waveforms in {}, loading them...'
.format(path_to_denoised_waveforms))
denoised_waveforms = np.load(path_to_denoised_waveforms)
else:
logger.info(
'Did not find denoised waveforms in {}, evaluating them'
'from {}'.format(path_to_denoised_waveforms,
path_to_waveforms_clear))
waveforms_clear = np.load(path_to_waveforms_clear)
denoised_waveforms = dim_red.denoise(waveforms_clear, rotation,
CONFIG)
logger.info('Saving denoised waveforms to {}'.format(
path_to_denoised_waveforms))
np.save(path_to_denoised_waveforms, denoised_waveforms)
isolated_index, x, y = get_isolated_spikes_and_locations(
denoised_waveforms, main_channel, CONFIG)
x = (x - np.mean(x)) / np.std(x)
y = (y - np.mean(y)) / np.std(y)
corrupted_index = np.logical_not(
np.in1d(np.arange(spike_index_clear.shape[0]), isolated_index))
spike_index_collision = np.concatenate(
[spike_index_collision, spike_index_clear[corrupted_index]],
axis=0)
spike_index_clear = spike_index_clear[isolated_index]
waveforms_clear = waveforms_clear[isolated_index]
#################################################
# Dimensionality reduction (Isolated Waveforms) #
#################################################
scores = dim_red.main_channel_scores(waveforms_clear, rotation,
spike_index_clear, CONFIG)
scores = (scores - np.mean(scores, axis=0)) / np.std(scores)
scores = np.concatenate(
[
x[:, np.newaxis, np.newaxis], y[:, np.newaxis, np.newaxis],
scores[:, :, np.newaxis]
],
axis=1)
else:
logger.info('Reducing spikes dimensionality with PCA matrix...')
scores = dim_red.score(waveforms_clear, rotation,
spike_index_clear[:, 1],
CONFIG.neighChannels, CONFIG.geom)
# save scores
path_to_score = os.path.join(TMP_FOLDER, 'score_clear.npy')
np.save(path_to_score, scores)
logger.info('Saved spike scores in {}...'.format(path_to_score))
return scores, spike_index_clear, spike_index_collision
def _neural_network_detection(standarized_path, standarized_params,
n_observations, output_directory):
"""Run neural network detection and autoencoder dimensionality reduction
"""
logger = logging.getLogger(__name__)
CONFIG = read_config()
OUTPUT_DTYPE = CONFIG.preprocess.dtype
TMP_FOLDER = os.path.join(CONFIG.data.root_folder, output_directory)
# detect spikes
bp = BatchProcessor(
standarized_path,
standarized_params['dtype'],
standarized_params['n_channels'],
standarized_params['data_format'],
CONFIG.resources.max_memory,
buffer_size=0)
# check if all scores, clear and collision spikes exist..
path_to_score = os.path.join(TMP_FOLDER, 'score_clear.npy')
path_to_spike_index_clear = os.path.join(TMP_FOLDER,
'spike_index_clear.npy')
path_to_spike_index_collision = os.path.join(TMP_FOLDER,
'spike_index_collision.npy')
if all([
os.path.exists(path_to_score),
os.path.exists(path_to_spike_index_clear),
os.path.exists(path_to_spike_index_collision)
]):
logger.info('Loading "{}", "{}" and "{}"'.format(
path_to_score, path_to_spike_index_clear,
path_to_spike_index_collision))
scores = np.load(path_to_score)
clear = np.load(path_to_spike_index_clear)
collision = np.load(path_to_spike_index_collision)
else:
logger.info('One or more of "{}", "{}" or "{}" files were missing, '
'computing...'.format(path_to_score,
path_to_spike_index_clear,
path_to_spike_index_collision))
# apply threshold detector on standarized data
autoencoder_filename = CONFIG.neural_network_autoencoder.filename
mc = bp.multi_channel_apply
res = mc(
neuralnetwork.nn_detection,
mode='memory',
cleanup_function=neuralnetwork.fix_indexes,
neighbors=CONFIG.neighChannels,
geom=CONFIG.geom,
temporal_features=CONFIG.spikes.temporal_features,
# FIXME: what is this?
temporal_window=3,
th_detect=CONFIG.neural_network_detector.threshold_spike,
th_triage=CONFIG.neural_network_triage.threshold_collision,
detector_filename=CONFIG.neural_network_detector.filename,
autoencoder_filename=autoencoder_filename,
triage_filename=CONFIG.neural_network_triage.filename)
# save clear spikes
clear = np.concatenate([element[1] for element in res], axis=0)
logger.info('Removing clear indexes outside the allowed range to '
'draw a complete waveform...')
clear, idx = detect.remove_incomplete_waveforms(
clear, CONFIG.spikeSize + CONFIG.templatesMaxShift,
n_observations)
np.save(path_to_spike_index_clear, clear)
logger.info('Saved spike index clear in {}...'
.format(path_to_spike_index_clear))
# save collided spikes
collision = np.concatenate([element[2] for element in res], axis=0)
logger.info('Removing collision indexes outside the allowed range to '
'draw a complete waveform...')
collision, _ = detect.remove_incomplete_waveforms(
collision, CONFIG.spikeSize + CONFIG.templatesMaxShift,
n_observations)
np.save(path_to_spike_index_collision, collision)
logger.info('Saved spike index collision in {}...'
.format(path_to_spike_index_collision))
if CONFIG.clustering.clustering_method == 'location':
#######################
# Waveform extraction #
#######################
# TODO: what should the behaviour be for spike indexes that are
# when starting/ending the recordings and it is not possible to
# draw a complete waveform?
logger.info('Computing whitening matrix...')
bp = BatchProcessor(standarized_path, standarized_params['dtype'],
standarized_params['n_channels'],
standarized_params['data_format'],
CONFIG.resources.max_memory)
batches = bp.multi_channel()
first_batch, _, _ = next(batches)
Q = whiten.matrix(first_batch, CONFIG.neighChannels,
CONFIG.spikeSize)
path_to_whitening_matrix = os.path.join(TMP_FOLDER,
'whitening.npy')
np.save(path_to_whitening_matrix, Q)
logger.info('Saved whitening matrix in {}'
.format(path_to_whitening_matrix))
# apply whitening to every batch
(whitened_path, whitened_params) = bp.multi_channel_apply(
np.matmul,
mode='disk',
output_path=os.path.join(TMP_FOLDER, 'whitened.bin'),
if_file_exists='skip',
cast_dtype=OUTPUT_DTYPE,
b=Q)
main_channel = clear[:, 1]
# load and dump waveforms from clear spikes
path_to_waveforms_clear = os.path.join(TMP_FOLDER,
'waveforms_clear.npy')
if os.path.exists(path_to_waveforms_clear):
logger.info('Found clear waveforms in {}, loading them...'
.format(path_to_waveforms_clear))
waveforms_clear = np.load(path_to_waveforms_clear)
else:
logger.info(
'Did not find clear waveforms in {}, reading them from {}'
.format(path_to_waveforms_clear, whitened_path))
explorer = RecordingExplorer(
whitened_path, spike_size=CONFIG.spikeSize)
waveforms_clear = explorer.read_waveforms(clear[:, 0], 'all')
np.save(path_to_waveforms_clear, waveforms_clear)
logger.info('Saved waveform from clear spikes in: {}'
.format(path_to_waveforms_clear))
main_channel = clear[:, 1]
# save rotation
detector_filename = CONFIG.neural_network_detector.filename
autoencoder_filename = CONFIG.neural_network_autoencoder.filename
rotation = neuralnetwork.load_rotation(detector_filename,
autoencoder_filename)
path_to_rotation = os.path.join(TMP_FOLDER, 'rotation.npy')
logger.info("rotation_matrix_shape = {}".format(rotation.shape))
np.save(path_to_rotation, rotation)
logger.info(
'Saved rotation matrix in {}...'.format(path_to_rotation))
logger.info('Denoising...')
path_to_denoised_waveforms = os.path.join(TMP_FOLDER,
'denoised_waveforms.npy')
if os.path.exists(path_to_denoised_waveforms):
logger.info('Found denoised waveforms in {}, loading them...'
.format(path_to_denoised_waveforms))
denoised_waveforms = np.load(path_to_denoised_waveforms)
else:
logger.info(
'Did not find denoised waveforms in {}, evaluating them'
'from {}'.format(path_to_denoised_waveforms,
path_to_waveforms_clear))
waveforms_clear = np.load(path_to_waveforms_clear)
denoised_waveforms = dim_red.denoise(waveforms_clear, rotation,
CONFIG)
logger.info('Saving denoised waveforms to {}'.format(
path_to_denoised_waveforms))
np.save(path_to_denoised_waveforms, denoised_waveforms)
isolated_index, x, y = get_isolated_spikes_and_locations(
denoised_waveforms, main_channel, CONFIG)
x = (x - np.mean(x)) / np.std(x)
y = (y - np.mean(y)) / np.std(y)
corrupted_index = np.logical_not(
np.in1d(np.arange(clear.shape[0]), isolated_index))
collision = np.concatenate(
[collision, clear[corrupted_index]], axis=0)
clear = clear[isolated_index]
waveforms_clear = waveforms_clear[isolated_index]
#################################################
# Dimensionality reduction (Isolated Waveforms) #
#################################################
scores = dim_red.main_channel_scores(waveforms_clear, rotation,
clear, CONFIG)
scores = (scores - np.mean(scores, axis=0)) / np.std(scores)
scores = np.concatenate(
[
x[:, np.newaxis, np.newaxis], y[:, np.newaxis, np.newaxis],
scores[:, :, np.newaxis]
],
axis=1)
else:
# save scores
scores = np.concatenate([element[0] for element in res], axis=0)
logger.info(
'Removing scores for indexes outside the allowed range to '
'draw a complete waveform...')
scores = scores[idx]
# compute Q for whitening
logger.info('Computing whitening matrix...')
bp = BatchProcessor(standarized_path, standarized_params['dtype'],
standarized_params['n_channels'],
standarized_params['data_format'],
CONFIG.resources.max_memory)
batches = bp.multi_channel()
first_batch, _, _ = next(batches)
Q = whiten.matrix_localized(first_batch, CONFIG.neighChannels,
CONFIG.geom, CONFIG.spikeSize)
path_to_whitening_matrix = os.path.join(TMP_FOLDER,
'whitening.npy')
np.save(path_to_whitening_matrix, Q)
logger.info('Saved whitening matrix in {}'
.format(path_to_whitening_matrix))
scores = whiten.score(scores, clear[:, 1], Q)
np.save(path_to_score, scores)
logger.info('Saved spike scores in {}...'.format(path_to_score))
# save rotation
detector_filename = CONFIG.neural_network_detector.filename
autoencoder_filename = CONFIG.neural_network_autoencoder.filename
rotation = neuralnetwork.load_rotation(detector_filename,
autoencoder_filename)
path_to_rotation = os.path.join(TMP_FOLDER, 'rotation.npy')
np.save(path_to_rotation, rotation)
logger.info(
'Saved rotation matrix in {}...'.format(path_to_rotation))
np.save(path_to_score, scores)
logger.info('Saved spike scores in {}...'.format(path_to_score))
return scores, clear, collision
def get_isolated_spikes_and_locations(denoised_waveforms, main_channel,
CONFIG):
power_all_chan_denoised = np.linalg.norm(denoised_waveforms, axis=1)
th = CONFIG.location.isolation_threshold
isolated_index = []
dist = ss.distance_matrix(CONFIG.geom, CONFIG.geom)
for i in range(denoised_waveforms.shape[0] - 1):
if dist[main_channel[i], main_channel[i + 1]] >= th:
if i == 0:
isolated_index += [i]
elif dist[main_channel[i], main_channel[i - 1]] >= th:
isolated_index += [i]
isolated_index = np.asarray(isolated_index, dtype=np.int32)
mask = np.ones([isolated_index.shape[0], CONFIG.recordings.n_channels])
for i, j in enumerate(isolated_index):
nn = np.where(CONFIG.neighChannels[main_channel[j]])[0]
for k in range(CONFIG.recordings.n_channels):
if mask[i, k] and k not in nn:
mask[i, k] = 0
x = np.sum(
mask * power_all_chan_denoised[isolated_index] * CONFIG.geom[:, 0],
axis=1) / np.sum(
mask * power_all_chan_denoised[isolated_index], axis=1)
y = np.sum(
mask * power_all_chan_denoised[isolated_index] * CONFIG.geom[:, 1],
axis=1) / np.sum(
mask * power_all_chan_denoised[isolated_index], axis=1)
return isolated_index, x, y
|
"""
Created by Epic at 10/10/20
"""
from .exceptions import IncorrectModuleFormat
from pathlib import Path
from asyncio import get_event_loop, Future
from aiohttp import ClientSession
from typing import Optional
from re import compile
from colorama import init, Fore, Style
regex = compile("(\w+@)?([A-z0-9-_]+)/([A-z0-9-_]+)(:\w+)?")
init()
log_queue = []
def is_there_a_project_here(directory: Path):
nest_config = directory / "nest.json"
return nest_config.is_file()
def is_module_already_added(modules: list, name: str, author: str):
for module in modules:
if module["name"] == name and module["author"] == "author":
return True
return False
class ValuedEvent:
def __init__(self):
self.future: Optional[Future] = None
self.loop = get_event_loop()
def set(self, value):
if not self.future.done():
self.future.set_result(value)
async def wait(self):
if self.future.done():
return self.future.result()
return await self.future
def get_uri_data(uri: str):
regex_result = regex.fullmatch(uri)
if regex_result is None:
raise IncorrectModuleFormat
groups = regex_result.groups()
if groups[0] is None:
download_type = "github"
else:
download_type = groups[0][:-1]
if groups[3] is None:
version = None
else:
version = groups[3][1:]
author = groups[1]
name = groups[2]
return {
"uri": uri,
"name": name,
"author": author,
"version": version,
"download_type": download_type
}
async def get_file_contents(user, name, file_name):
async with ClientSession() as session:
session.headers = {
"User-Agent": "Nest.py (https://github.com/nest-framework/nest.py)"
}
request = await session.get(f"https://raw.githubusercontent.com/{user}/{name}/master/{file_name}")
try:
request.raise_for_status()
except:
return None
return await request.text()
def warn(text):
log_queue.append(f"[{Fore.YELLOW}WARNING{Style.RESET_ALL}] {text}")
def clear_logs():
for log in log_queue:
print(log)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.