index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
20,400 | 52b4f0ac5d3b7b4fac267f2af63cff3b5df9a001 | """add section_id column
Revision ID: 7bf4eac76958
Revises: 0d267ae11945
Create Date: 2023-02-27 00:17:13.935016
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "7bf4eac76958"
down_revision = "0d267ae11945"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("cre_links", schema=None) as batch_op:
batch_op.drop_constraint("uq_cre_link_pair", type_="unique")
batch_op.create_unique_constraint("uq_pair", ["group", "cre"])
with op.batch_alter_table("cre_node_links", schema=None) as batch_op:
batch_op.drop_constraint("uq_cre_node_link_pair", type_="unique")
batch_op.create_unique_constraint("uq_pair", ["cre", "node"])
with op.batch_alter_table("node", schema=None) as batch_op:
batch_op.add_column(sa.Column("section_id", sa.String(), nullable=True))
batch_op.drop_constraint("uq_node", type_="unique")
batch_op.create_unique_constraint(
"uq_node",
[
"name",
"section",
"subsection",
"ntype",
"description",
"version",
"section_id",
],
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("node", schema=None) as batch_op:
batch_op.drop_constraint("uq_node", type_="unique")
batch_op.create_unique_constraint(
"uq_node",
["name", "section", "subsection", "ntype", "description", "version"],
)
batch_op.drop_column("section_id")
with op.batch_alter_table("cre_node_links", schema=None) as batch_op:
batch_op.drop_constraint("uq_pair", type_="unique")
batch_op.create_unique_constraint("uq_cre_node_link_pair", ["cre", "node"])
with op.batch_alter_table("cre_links", schema=None) as batch_op:
batch_op.drop_constraint("uq_pair", type_="unique")
batch_op.create_unique_constraint("uq_cre_link_pair", ["group", "cre"])
# ### end Alembic commands ###
|
20,401 | f85e4a0b6334bedcf0e1e36f84d5436b3ab64a58 | from django.contrib import admin
from .models import Disaster
admin.site.register(Disaster)
|
20,402 | bbc0f8194bc20e6a6778e3f2fe59f0cb4e769175 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import sqlite3
class SpiderGhPipeline(object):
def open_spider(self,spider):
self.con = sqlite3.connect("yubao.sqlite")
self.cu = self.con.cursor()
def process_item(self, item, spider):
print (spider.name,'yubao')
# insert_sql = "insert into yubao (ybname,ybcount,ybsuccessrate,ybstock,ybcause) \
# values ('{}','{}','{}','{}','{}')".format(item['Name'],item['Count'],\
# item['Successrate'],item['Stock'],item['Cause'])
# print(insert_sql)
# insert_sql = "insert into yubao (ybname) values('{}')".format(item['Name'])
# insert_sql = "insert into yubao (ybname,ybcount,ybsuccessrate,ybstock,ybcause) \
# values ('{}','{}','{}','{}','{}')".format( item['Name'], "", \
# "", "", "" )
# self.cu.execute(insert_sql)
return item
def spider_close(self,spider):
self.con.close()
|
20,403 | e3eac460baf0dcc8aee84fb2b4c0f499fcf940a7 | #Advent of Code 2019 Day 24 Part 1
f = open('C:/Users/Simon/SkyDrive/Home Stuff/Python/Advent of Code/2019/2019-24.txt')
contents = f.read()
input=contents.splitlines()
import math
class Area():
def __init__(self,input):
self.map={}
j=0
for row in input:
i=0
for g in row:
pos=complex(i,j)
self.map[pos]=g
i+=1
j+=1
#Find bounds of map
self.minX=int(min(self.map.keys(),key=lambda x:x.real).real)
self.maxX=int(max(self.map.keys(),key=lambda x:x.real).real)
self.minY=int(min(self.map.keys(),key=lambda x:x.imag).imag)
self.maxY=int(max(self.map.keys(),key=lambda x:x.imag).imag)
def nAdjacent(self,pos,contents):
count=0
for i in (-1,0,1):
for j in (-1j,0,1j):
if (i==0 or j==0) and i!=j: #Only count 4 adjacent neighbours
nPos=pos+i+j
if nPos in self.map.keys():
if self.map[nPos]==contents:
count+=1
return(count)
def nTotal(self,contents):
count=0
for k,v in self.map.items():
if v==contents:
count+=1
return(count)
def strMap(self):
ret=''
for v in self.map.values():
ret+=v
return(ret)
def bioRating(self): #Defined as the sum of each bug in increasing powers of 2 based on reading order
count=0
for j in range(5):
for i in range(5):
if self.map[complex(i,j)]=='#':
count+=int(math.pow(2,5*j+i))
return(count)
def update(self): #Check adjacent acres and update each acre
newItems={}
for k,v in self.map.items():
if v=='#':
if self.nAdjacent(k,'#')==1:
newItems[k]='#'
else:
newItems[k]='.'
elif v=='.':
if self.nAdjacent(k,'#') in (1,2):
newItems[k]='#'
else:
newItems[k]='.'
for k in newItems.keys():
self.map[k]=newItems[k]
def __str__(self,letters=False):
ret1=' '
ret2=''
for i in range(self.minX,self.maxX+1):
ret1+=str(abs(i)%10) #Create units row at top
for j in range(self.minY,self.maxY+1):
ret2+=str(abs(j)%10) #Create units column going down
for i in range(self.minX,self.maxX+1):
pos=complex(i,j)
symbol=self.map[pos]
ret2+=symbol
ret2+='\n'
return(ret1+'\n'+ret2)
def keyFromVal(dict,val):
for k,v in dict.items():
if val==v:
return(k)
def solveA(input):
area=Area(input)
print(area)
states={area.bioRating()} #bioRating serves as a unique encoding of state
for n in range(1000):
if n%20==1:
print('n='+str(n))
area.update()
rating=area.bioRating()
if rating in states:
return(rating)
states.add(rating)
retA=solveA(input) |
20,404 | 069043700ad67280da25586244d0d4e92dbd6295 | # Dynamic Programming for fibonacci
# n 0 1 2 3 4 5 6
# f(n) 0 1 1 2 3 5 8
#Bottoms up is iterative with subproblems solved before they are needed
#Can be solve without memoization as value of only last 2 is needed, hence can be kept in 2 variables i.e a,b and sum = a+b ; a = b; b = sum;
bFibMemo = [0,1]
def bottomsUpFibonacci(n):
for i in range(2,n+1):
bFibMemo.append(bFibMemo[i-1] + bFibMemo[i-2])
return bFibMemo[n]
#Tops down is recursive with subproblems solved after they are called
#memoization is a must to prevent solving same subproblem multiple times
tFibMemo = {0:0,1:1}
def topsDownFibonacci(n):
if n in tFibMemo.keys():
return tFibMemo.get(n)
tFibMemo[n] = topsDownFibonacci(n-1) +topsDownFibonacci(n-2)
return tFibMemo.get(n)
def main():
print('Let\'s Code Fibonacci')
print(bottomsUpFibonacci(10))
print(topsDownFibonacci(10))
if __name__ == '__main__':
main() |
20,405 | 424eb11cce4fb286a83de7f4d52338e21c3ee7b6 | #Library
import pandas as pd
import math
import numpy as np
from sklearn.utils import shuffle
from random import random
from random import randint
# Neural Model Class
from Modify_Neural_Network import NeuralModel
def prepareData(num_inputs, number_node_hidden, learning_rate, number_of_iterate):
normalized = lambda x, maxv, minv : (x-minv*0.95)/(maxv*1.05-minv*0.95)
train_path = input("Type path of train file: ")
test_path = input("Type path of test file: ")
training_data = pd.read_csv("../../../dataFile/"+train_path+".csv")
testing_data = pd.read_csv("../../../dataFile/"+test_path+".csv")
max_val, min_val = [0 for i in range(num_inputs)], [0 for i in range(num_inputs)]
for i in range(len(training_data.keys())):
if(training_data.keys()[i] != 'class'):
max_val[i] = max(training_data[training_data.keys()[i]])
min_val[i] = min(training_data[training_data.keys()[i]])
# count each data class
list_data_class = [x for x in set(training_data['class'])]
number_of_class = len(list_data_class)
list_split_data = [[] for _ in range(number_of_class)]
# split data in each class list
for i in list_data_class:
list_split_data[i] = training_data[training_data['class'] == i]
number_data_each_class = []
for j in range(len(list_split_data)):
number_data_each_class.append(len(list_split_data[j]))
print(len(list_split_data[0]),len(list_split_data[1]), len(training_data))
print(number_data_each_class)
# calculate learning rate in each class
list_learning_rate = [0 for _ in range(number_of_class)]
for class_index in range(len(list_split_data)):
list_learning_rate[class_index] = (1-len(list_split_data[class_index])/len(training_data))*learning_rate
# shuffle row in dataframe
index = training_data.index
training_data = shuffle(training_data)
training_data.index = index
index = testing_data.index
testing_data = shuffle(testing_data)
testing_data.index = index
ID = training_data.index.values
list_train = []
for j in ID:
format_data = []
for i in training_data:
format_data.append(training_data[i][j])
list_train.append(format_data)
ID2 = testing_data.index.values
list_test = []
for j in ID2:
format_data = []
for i in testing_data:
format_data.append(testing_data[i][j])
list_test.append(format_data)
# print(data_key)
num_outputs = len(set(training_data['class']))
if(input("Do you want to nomalized this data?? (Yes or No) : ") == 'Yes'):
print("Eiei")
for i in range(len(list_train)):
for j in range(len(max_val)):
list_train[i][j] = normalized(list_train[i][j], max_val[j], min_val[j])
for i in range(len(list_test)):
for j in range(len(max_val)):
list_test[i][j] = normalized(list_test[i][j], max_val[j], min_val[j])
# %% Init Weight in neural network model
weight1 = []
weight2 = []
# for i in range(number_node_hidden):
# weight = {'weights':[]}
# # nm_weight = {'weights':[]}
# for j in range(num_inputs):
# weight_random = random()
# sub_weight = {}
# for k in np.arange(0.0, 1.1, 0.1):
# sub_weight[round(k, 1)] = weight_random
# weight['weights'].append(sub_weight)
# weight1.append(weight)
# weight2 = [{'weights':[random() for i in range(number_node_hidden)]} for i in range(num_outputs)]
a = [[{'weights': [0.1791617459294188, 0.20949535419419907, 0.32159256607715114, 0.2559110040058731, 0.5894734867633385, 0.7265703339619599, 0.053396734714828864, 0.6869924446146348, 0.7260764960484191]}, {'weights': [0.03631471862789082, 0.8386361317090046, 0.5378010903880449, 0.9146475013894813, 0.40369801706260633, 0.3840088536165571, 0.30409269032689257, 0.9743106383084608, 0.1458162190061285]}, {'weights': [0.252112766358789, 0.038507878238444704, 0.2003138072655012, 0.823688024684086, 0.5546729572356813, 0.7605398538065354, 0.3701165859034794, 0.8138821779270934, 0.5153920068185434]}, {'weights': [0.5129516465965127, 0.08289478343727208, 0.2775915574669692, 0.320472841314004, 0.49246111778764456, 0.2565040365304163, 0.01911706436684213, 0.468172845124437, 0.9107127651490304]}, {'weights': [0.7037612227156317, 0.4075121111603984, 0.809597986579944, 0.39877279122269615, 0.5603314574634248, 0.20720845252308795, 0.9279479463408908, 0.06314467853667738, 0.5938049541536504]}, {'weights': [0.47999337192258784, 0.27994457434481246, 0.33464823975379965, 0.6191641228617221, 0.20503367576778364, 0.3762784037229733, 0.7846772369445457, 0.545794786854605, 0.9092497340991079]}, {'weights': [0.7271978593906084, 0.546594742312516, 0.08207690630941478, 0.8169096284176823, 0.5200688969261043, 0.2203161816231788, 0.7483453786157107, 0.5231731913863417, 0.6952810121160047]}, {'weights': [0.50802973976921, 0.3004335193461627, 0.9268371432568888, 0.2679557352032178, 0.16851248134994645, 0.9379315610522461, 0.47561278063324763, 0.8903244922474931, 0.26504310972552947]}, {'weights': [0.03683262328461823, 0.048413039311544837, 0.8214255095871864, 0.7806951630266145, 0.4929803748675007, 0.8768924135942017, 0.16029102375625037, 0.22306923794346845, 0.033933282442406676]}, {'weights': [0.053440253804044935, 0.5472109480744625, 0.9578948822818665, 0.28172289640502224, 0.6833273738530621, 0.32905302895121147, 0.290631527325788, 0.6895059195786128, 0.6503548490701798]}, {'weights': [0.7115445183015805, 0.376659481884389, 0.38622321453773956, 0.7124922821969374, 0.5246152168979741, 0.9065573616242696, 0.8108687282428997, 0.5940074524832188, 0.3827238609246866]}], [{'weights': [0.6093538373490046, 0.590650192596591, 0.8713715161464182, 0.6315256250186093, 0.685813538172896, 0.1018853870449502, 0.24258263920472778, 0.2184604778664716, 0.8932888430497035, 0.5845676866900875, 0.4878564393334619]}, {'weights': [0.8529214953082211, 0.5011727470453174, 0.6089569019064568, 0.9394670551384207, 0.9099001782796808, 0.42354949025769184, 0.6339649208538661, 0.46247282270394385, 0.8145245491779319, 0.4473399539212264, 0.6983163279266904]}]]
for i in range(len(a)):
if i == 0:
for j in range(len(a[i])):
weight = {'weights':[]}
for k in range(len(a[i][j]['weights'])):
sub_weight = {}
for c in np.arange(0.0, 1.1, 0.1):
sub_weight[round(c, 1)] = a[i][j]['weights'][k]
weight['weights'].append(sub_weight)
weight1.append(weight)
else:
weight2 = a[i]
break
# %% Model part
network = NeuralModel(num_inputs, number_node_hidden, num_outputs, weight1, weight2, number_data_each_class)
network.training(list_train,learning_rate , number_of_iterate, num_outputs, list_test)
accuracy = 0
for row in list_test:
prediction = network.predict(row)
print("Apply Model")
print("Expect=%d Output=%d" % (row[-1], prediction.index(max(prediction))))
# file_object = open('../../../present/learning_rate_test/yeast/BPNN+MW+ALR/result/yeast_prediction_2'+'.txt', 'a')
# file_object.write("Expect=%d Output=%d\n" % (row[-1], prediction.index(max(prediction))))
# file_object.write(str(row)+"\n\n")
# file_object.close()
if row[-1] == prediction.index(max(prediction)):
accuracy += 1
sum_accuracy = accuracy/len(list_test)*100
print("Mean Accuracy = ", sum_accuracy)
# file_object = open('../../../present/learning_rate_test/yeast/BPNN+MW+ALR/result/yeast_prediction_2'+'.txt', 'a')
# file_object.write("Accuracy : "+ str(sum_accuracy))
# file_object.close()
prepareData(int(input('Please fill number of node input: ')), int(input('Please fill number of node hidden: ')),float(input('Please fill number of learning rate: ')), int(input('Please fill round of iteration in training phase: '))) |
20,406 | c3f120473d236e8becb3183e13b05b9b984ddb62 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 15 14:02:59 2018
@author: Robert
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class LakeLoadEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self):
self.pc = 2.4 # half-saturation constant for P
self.r = 0.34 # recycling contant
self.s = 3.3 # transfer to mud constant
self.h = 0.19 # flushing constant
self.b = 0.022 # removal-from-mud constant
self.q = 8 # shape constant for recycling
self.var = 0.01 # noise
# constants -- see table 1
self.alpha = 1 # benefit per unit loading
self.beta1 = 0 # loss of amenity
self.beta2 = 0.065 # loss of amenity
# see eq 13
self.pThresh = 35
self.mThresh = 300
self.action_space = spaces.Discrete(12)
# 12 actions - action 0 is do not add P, action 11 is add 12 units of P
l = np.array([0, 0])
h = np.array([self.pThresh, self.mThresh])
self.observation_space = spaces.Box(l, h)
# observation space is (P,mP)
self.seed()
self.viewer = None
self.state = None
def e(self):
return math.exp(-self.s - self.h)
def g(self):
return (self.s + self.h - 1 + self.e()) / (self.s + self.h) * self.s / (self.s + self.h)
def f(self, P):
return P ** self.q / (self.pc ** self.q + P ** self.q)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
state = self.state
P, M = state
L = action * 12 / 11
z = self.np_random.normal(0, math.sqrt(self.var))
N = math.exp(z - 0.5 * self.var)
e = self.e()
f = self.f(P)
g = self.g()
Pnext = e * P + (1 - e) / (self.s + self.h) * (L * N + self.r * M * f)
Mnext = M * (1 - self.b) + (1 - e) * self.s / (self.s + self.h) * P + g * L * N + (g - 1) * self.r * M * f
# see eq 6-10
self.state = (Pnext, Mnext)
done = False
if not done:
reward = self.alpha * math.exp(z) * L - self.beta1 * P - self.beta2 * P ** 2
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = self.alpha * math.exp(z) * L - self.beta1 * P - self.beta2 * P ** 2
else:
if self.steps_beyond_done == 0:
logger.warn(
"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
return np.array(self.state), reward, done, {}
def reset(self):
self.state = self.np_random.uniform(low=np.array([0, 0]), high=np.array([5, 150]), size=(2,))
self.steps_beyond_done = None
return np.array(self.state)
def start_at_state(self, P, M):
#if P not in range(0, 5) or M not in range(0, 150):
if not (0 <= P <= 35) or not (0 <= M <= 300):
print(
"Invalid values, please enter a value between 0 and 5, and M between 1 and 150. Start state will be randomized")
self.state = self.np_random.uniform(low=np.array([0, 0]), high=np.array([5, 150]), size=(2,))
else:
self.state = np.array([P, M])
self.steps_beyond_done = None
return np.array(self.state)
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.mThresh
Xscale = screen_width / self.mThresh
Yscale = screen_height / self.pThresh
dotL = 5
dotH = 5
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l, r, t, b = -dotL / 2, dotL / 2, dotH / 2, -dotH / 2
dot = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.dottrans = rendering.Transform()
dot.add_attr(self.dottrans)
self.viewer.add_geom(dot)
if self.state is None: return None
x = self.state
Xpos = x[1] * Xscale # MIDDLE OF CART
Ypos = x[0] * Yscale
self.dottrans.set_translation(Xpos, Ypos)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer: self.viewer.close() |
20,407 | 26dca57997600f1b5bcebbf0851c150ed5456d17 | data.loc['2009':, 'FR04037'].resample('M').mean().plot()
data.loc['2009':, 'FR04037'].resample('M').median().plot() |
20,408 | 1ca09d23cf59fd0d43dad6192e48eed8e24e9275 | # The script plots the sequence of Collatz
# starting from a specific n_0
import sys
import matplotlib.pyplot as plt
from matplotlib import style
style.use("Solarize_Light2")
def f(n):
return int(n/2) if (n % 2 == 0) else (3*n + 1)
if len(sys.argv) != 2:
sys.exit("USAGE : Collatz.py n_0")
# n_0 : starting value
n_0 = int(sys.argv[1])
seq = [n_0]
# The algorithm ends when 1 is reached
# after 1, the sequence enters a loop
while(seq[len(seq) - 1] != 1):
# A new value is added to the list
# as the result of the f function
# applied to the last element of
# the sequence
seq.append(f(seq[len(seq) - 1]))
print(seq)
# Diagram visualization
plt.plot([j for j in range(len(seq))], seq, "mo--")
plt.axis([0, len(seq) + int(0.1 * len(seq)), 0, int(max(seq)) + int(0.1 * max(seq))])
plt.suptitle("Visualizing Collatz sequence for " + "n_0 = " + str(n_0))
plt.show() |
20,409 | 7737de9185eaea9f49db3214eed15f81bd1689da | # _*_ coding: utf-8 _*_
__author__ = 'Thpffcj'
# 字典来代替switch
day = 8
def get_sunday():
return 'Sunday'
def get_monday():
return 'Monday'
def get_tuesday():
return 'Tuesday'
def get_default():
return 'Unkown'
switcher = {
0: get_sunday,
1: get_monday,
2: get_tuesday
}
# day_name = switcher[day]
day_name = switcher.get(day, get_default)()
print(day_name)
|
20,410 | 4cfb724518f842f9f97d5381518c30bd8e57b148 | from urllib.robotparser import RobotFileParser
rp = RobotFileParser()
rp.set_url('http://www.janshu.com/robots.txt')
rp.read()
print(rp.can_fetch('*', 'http://jianshu.com/p/b67554025d7d'))
|
20,411 | 81a860383fd195988de7d6f094e45a8235f93820 | i=open('words.txt','r')
for line in i.readlines():
x=len(line.strip(' '))
print(line,x)
|
20,412 | 5800cd7ac1d4378f212290541666e1b0b138c348 | from selenium.webdriver import Remote
from selenium import webdriver
def browser():
#driver = webdriver.Chrome()
localhost = "127.0.0.1:4444"
dc = {'browserName':'chrome'}
driver = webdriver.Remote(command_executor='http://' + localhost + '/wd/hub', desired_capabilities= dc)
return driver
if __name__ == '__main__':
dr = browser()
dr.get("https://www.asuswebstorage.com/navigate/a/#/login")
dr.quit() |
20,413 | 34867981bed619772898afbd14e7714ea628742c | """
https://leetcode.com/problems/maximum-subarray/
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
Example 1:
Input: nums = [-2,1,-3,4,-1,2,1,-5,4]
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
"""
class Solution:
def findMaximumSubarraySlidingWindow(self, k, nums):
"""
2. Time Complexity
The time complexity of algorithm will be O(N)
3. Space Complexity
The algorithm runs in constant space O(1)
"""
window_start, window_sum, window_max= 0, 0, 0
for i in range(len(nums)):
window_sum += nums[i] #add the next element
# slide the window, we don't need to slide if we have not hit the required window size of K
if i >= k-1:
window_max = max(window_sum, window_max) # calculate the maximum sum
window_sum -= nums[window_start] #substract the element going out
window_start += 1 #slide the window ahead
return window_max
def findMaximumSubarrayBruteForce(self, k, nums):
window_max = 0
for i in range(len(nums)-k+1):
window_sum = 0
for j in range(i, i+k):
window_sum += nums[j]
window_max = max(window_sum, window_max)
return window_max
def main():
a = Solution()
result = a.findMaximumSubarraySlidingWindow(4, [-2,1,-3,4,-1,2,1,-5,4])
print("(Sliding Window)Maximum of subarrays of size K: " + str(result))
result = a.findMaximumSubarrayBruteForce(4, [-2,1,-3,4,-1,2,1,-5,4])
print("(Brute Force)Maximum of subarrays of size K: " + str(result))
if __name__ == "__main__":
main()
|
20,414 | f8ae965509e36537c4c54c8782657246ce16abb4 | """Define functions to use as property methods on expressions.
These will automatically compute the value and avoid the need for ``.new()``.
To automatically create the functions, run:
$ python -m graphblas.core.automethods
"""
from .. import config
def _get_value(self, attr=None, default=None):
if config.get("autocompute"):
if self._value is None:
self._value = self.new()
if attr is None:
return self._value
return getattr(self._value, attr)
if default is not None:
return default.__get__(self)
raise TypeError(
f"{attr} not enabled for objects of type {type(self)}. "
f"Use `.new()` to create a new {self.output_type.__name__}.\n\n"
"Hint: use `graphblas.config.set(autocompute=True)` to enable "
"automatic computation of expressions."
)
def _set_name(self, name):
self._get_value().name = name
def default__eq__(self, other):
raise TypeError(
f"__eq__ not enabled for objects of type {type(self)}. "
f"Use `.new()` to create a new {self.output_type.__name__}, then use `.isequal` method.\n\n"
"Hint: use `graphblas.config.set(autocompute=True)` to enable "
"automatic computation of expressions."
)
# Begin auto-generated code
def S(self):
return self._get_value("S")
def T(self):
return self._get_value("T")
def V(self):
return self._get_value("V")
def __and__(self):
return self._get_value("__and__")
def __array__(self):
return self._get_value("__array__")
def __bool__(self):
return self._get_value("__bool__")
def __complex__(self):
return self._get_value("__complex__")
def __contains__(self):
return self._get_value("__contains__")
def __eq__(self):
return self._get_value("__eq__", default__eq__)
def __float__(self):
return self._get_value("__float__")
def __getitem__(self):
return self._get_value("__getitem__")
def __index__(self):
return self._get_value("__index__")
def __int__(self):
return self._get_value("__int__")
def __iter__(self):
return self._get_value("__iter__")
def __matmul__(self):
return self._get_value("__matmul__")
def __ne__(self):
return self._get_value("__ne__")
def __or__(self):
return self._get_value("__or__")
def __rand__(self):
return self._get_value("__rand__")
def __rmatmul__(self):
return self._get_value("__rmatmul__")
def __ror__(self):
return self._get_value("__ror__")
def _as_matrix(self):
return self._get_value("_as_matrix")
def _as_vector(self):
return self._get_value("_as_vector")
def _carg(self):
return self._get_value("_carg")
def _is_empty(self):
return self._get_value("_is_empty")
def _name_html(self):
return self._get_value("_name_html")
def _nvals(self):
return self._get_value("_nvals")
def apply(self):
return self._get_value("apply")
def diag(self):
return self._get_value("diag")
def ewise_add(self):
return self._get_value("ewise_add")
def ewise_mult(self):
return self._get_value("ewise_mult")
def ewise_union(self):
return self._get_value("ewise_union")
def gb_obj(self):
return self._get_value("gb_obj")
def get(self):
return self._get_value("get")
def inner(self):
return self._get_value("inner")
def is_empty(self):
return self._get_value("is_empty")
def isclose(self):
return self._get_value("isclose")
def isequal(self):
return self._get_value("isequal")
def kronecker(self):
return self._get_value("kronecker")
def mxm(self):
return self._get_value("mxm")
def mxv(self):
return self._get_value("mxv")
def name(self):
return self._get_value("name")
def nvals(self):
return self._get_value("nvals")
def outer(self):
return self._get_value("outer")
def power(self):
return self._get_value("power")
def reduce(self):
return self._get_value("reduce")
def reduce_columnwise(self):
return self._get_value("reduce_columnwise")
def reduce_rowwise(self):
return self._get_value("reduce_rowwise")
def reduce_scalar(self):
return self._get_value("reduce_scalar")
def reposition(self):
return self._get_value("reposition")
def select(self):
return self._get_value("select")
def ss(self):
return self._get_value("ss")
def to_coo(self):
return self._get_value("to_coo")
def to_csc(self):
return self._get_value("to_csc")
def to_csr(self):
return self._get_value("to_csr")
def to_dcsc(self):
return self._get_value("to_dcsc")
def to_dcsr(self):
return self._get_value("to_dcsr")
def to_dense(self):
return self._get_value("to_dense")
def to_dict(self):
return self._get_value("to_dict")
def to_dicts(self):
return self._get_value("to_dicts")
def to_edgelist(self):
return self._get_value("to_edgelist")
def to_values(self):
return self._get_value("to_values")
def value(self):
return self._get_value("value")
def vxm(self):
return self._get_value("vxm")
def wait(self):
return self._get_value("wait")
def __iadd__(self, other):
raise TypeError(f"'__iadd__' not supported for {type(self).__name__}")
def __iand__(self, other):
raise TypeError(f"'__iand__' not supported for {type(self).__name__}")
def __ifloordiv__(self, other):
raise TypeError(f"'__ifloordiv__' not supported for {type(self).__name__}")
def __imatmul__(self, other):
raise TypeError(f"'__imatmul__' not supported for {type(self).__name__}")
def __imod__(self, other):
raise TypeError(f"'__imod__' not supported for {type(self).__name__}")
def __imul__(self, other):
raise TypeError(f"'__imul__' not supported for {type(self).__name__}")
def __ior__(self, other):
raise TypeError(f"'__ior__' not supported for {type(self).__name__}")
def __ipow__(self, other):
raise TypeError(f"'__ipow__' not supported for {type(self).__name__}")
def __isub__(self, other):
raise TypeError(f"'__isub__' not supported for {type(self).__name__}")
def __itruediv__(self, other):
raise TypeError(f"'__itruediv__' not supported for {type(self).__name__}")
def __ixor__(self, other):
raise TypeError(f"'__ixor__' not supported for {type(self).__name__}")
# End auto-generated code
def _main():
from pathlib import Path
from .utils import _autogenerate_code
common = {
"_name_html",
"_nvals",
"gb_obj",
"get",
"isclose",
"isequal",
"name",
"nvals",
"wait",
# For infix
"__and__",
"__or__",
"__rand__",
"__ror__",
# Delayed methods
"apply",
"ewise_add",
"ewise_mult",
"ewise_union",
"select",
}
scalar = {
"__array__",
"__bool__",
"__complex__",
"__eq__",
"__float__",
"__index__",
"__int__",
"__ne__",
"_as_matrix",
"_as_vector",
"_is_empty",
"is_empty",
"value",
}
vector_matrix = {
"S",
"V",
"__contains__",
"__getitem__",
"__iter__",
"__matmul__",
"__rmatmul__",
"_carg",
"diag",
"reposition",
"ss",
"to_coo",
"to_dense",
"to_values",
}
vector = {
"_as_matrix",
"inner",
"outer",
"reduce",
"to_dict",
"vxm",
}
matrix = {
"_as_vector",
"T",
"kronecker",
"mxm",
"mxv",
"power",
"reduce_columnwise",
"reduce_rowwise",
"reduce_scalar",
"to_csc",
"to_csr",
"to_dcsc",
"to_dcsr",
"to_dicts",
"to_edgelist",
}
common_raises = set()
scalar_raises = {
"__matmul__",
"__rmatmul__",
}
vector_matrix_raises = {
"__array__",
"__bool__",
}
has_defaults = {
"__eq__",
}
# no inplace math for expressions
bad_sugar = {
"__iadd__",
"__ifloordiv__",
"__imod__",
"__imul__",
"__ipow__",
"__isub__",
"__itruediv__",
"__ixor__",
"__ior__",
"__iand__",
"__imatmul__",
}
# Copy the result of this above
lines = []
for name in sorted(common | scalar | vector_matrix | vector | matrix):
lines.append(f"def {name}(self):")
if name in has_defaults:
lines.append(f" return self._get_value({name!r}, default{name})\n\n")
else:
lines.append(f" return self._get_value({name!r})\n\n")
for name in sorted(bad_sugar):
lines.append(f"def {name}(self, other):")
lines.append(
f' raise TypeError(f"{name!r} not supported for {{type(self).__name__}}")\n\n'
)
_autogenerate_code(Path(__file__), "\n".join(lines))
# Copy to scalar.py and infix.py
lines = []
lines.append(" _get_value = automethods._get_value")
for name in sorted(common | scalar):
if name == "name":
lines.append(
" name = wrapdoc(Scalar.name)(property(automethods.name))"
".setter(automethods._set_name)"
)
else:
lines.append(f" {name} = wrapdoc(Scalar.{name})(property(automethods.{name}))")
lines.append(" # These raise exceptions")
for name in sorted(common_raises | scalar_raises):
lines.append(f" {name} = Scalar.{name}")
for name in sorted(bad_sugar):
if name == "__imatmul__":
continue
lines.append(f" {name} = automethods.{name}")
thisdir = Path(__file__).parent
infix_exclude = {"_get_value"}
def get_name(line):
return line.strip().split(" ", 1)[0]
text = "\n".join(lines) + "\n "
_autogenerate_code(thisdir / "scalar.py", text, "Scalar")
text = "\n".join(line for line in lines if get_name(line) not in infix_exclude) + "\n "
_autogenerate_code(thisdir / "infix.py", text, "Scalar")
# Copy to vector.py and infix.py
lines = []
lines.append(" _get_value = automethods._get_value")
for name in sorted(common | vector_matrix | vector):
if name == "ss":
lines.append(' if backend == "suitesparse":')
indent = " "
else:
indent = ""
if name == "name":
lines.append(
" name = wrapdoc(Vector.name)(property(automethods.name))"
".setter(automethods._set_name)"
)
else:
lines.append(
f" {indent}{name} = wrapdoc(Vector.{name})(property(automethods.{name}))"
)
if name == "ss":
lines.append(' else:\n ss = Vector.__dict__["ss"] # raise if used')
lines.append(" # These raise exceptions")
for name in sorted(common_raises | vector_matrix_raises):
lines.append(f" {name} = Vector.{name}")
for name in sorted(bad_sugar):
lines.append(f" {name} = automethods.{name}")
text = "\n".join(lines) + "\n "
_autogenerate_code(thisdir / "vector.py", text, "Vector")
text = "\n".join(line for line in lines if get_name(line) not in infix_exclude) + "\n "
_autogenerate_code(thisdir / "infix.py", text, "Vector")
# Copy to matrix.py and infix.py
lines = []
lines.append(" _get_value = automethods._get_value")
for name in sorted(common | vector_matrix | matrix):
if name == "ss":
lines.append(' if backend == "suitesparse":')
indent = " "
else:
indent = ""
if name == "name":
lines.append(
" name = wrapdoc(Matrix.name)(property(automethods.name))"
".setter(automethods._set_name)"
)
else:
lines.append(
f" {indent}{name} = wrapdoc(Matrix.{name})(property(automethods.{name}))"
)
if name == "ss":
lines.append(' else:\n ss = Matrix.__dict__["ss"] # raise if used')
lines.append(" # These raise exceptions")
for name in sorted(common_raises | vector_matrix_raises):
lines.append(f" {name} = Matrix.{name}")
for name in sorted(bad_sugar):
lines.append(f" {name} = automethods.{name}")
text = "\n".join(lines) + "\n "
_autogenerate_code(thisdir / "matrix.py", text, "Matrix")
text = "\n".join(line for line in lines if get_name(line) not in infix_exclude) + "\n "
_autogenerate_code(thisdir / "infix.py", text, "Matrix")
if __name__ == "__main__":
_main()
|
20,415 | 537ac0cdb55ed534ca0ef17b0a1d2d5187fb7a52 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
path = <your path here>
train = pd.read_csv(path+r'\train.csv')
test = pd.read_csv(path+r'\test.csv')
age_bins = [0, 16, 32, 48, 64, 80] #min to max
fare_bins = [-0.001, 7.91, 14.454, 31, 520] # min to max
# Checking what affects what and to what extent xD
# sns.catplot(x='Sex', y='Survived', data=train, kind='bar', height=5)
# sns.scatterplot(x=train.Name, y=train.Age, hue=train.Survived).set(xticklabels=[])
# sns.FacetGrid(train, col='Survived').map(sns.histplot, 'Fare', bins=25)
# sns.FacetGrid(train, col='Survived').map(sns.histplot, 'Fare', bins=25)
# sns.scatterplot(x=train.Fare, y=train.Age, hue=train.Survived)
# sns.catplot(data=train, x='Pclass', y='Survived', kind='bar', hue='Sex')
# sns.countplot(data=train, x='Survived', hue='Pclass')
# sns.catplot(data=train, x='SibSp', y='Survived', height=5, kind='bar')
# sns.countplot(data=train, x='Survived', hue='Embarked')
# plt.show('hold')
train.Sex = train.Sex.replace('male', 0).replace('female', 1)
test.Sex = test.Sex.replace('male', 0).replace('female', 1)
train.Embarked.fillna(train.Embarked.mode()[0], inplace=True)
train.Embarked = train.Embarked.replace('S', 0).replace('C', 1).replace('Q', 2)
test.Embarked = test.Embarked.replace('S', 0).replace('C', 1).replace('Q', 2)
train.Fare = train.Fare.fillna(train.Fare.mean())
test.Fare = test.Fare.fillna(test.Fare.mean())
for _ in (train, test):
_['Title'] = _.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
train.Title = train.Title.replace(['Don', 'Rev', 'Dr', 'Major', 'Lady', 'Mlle', 'Col', 'Capt', 'Countess', 'Jonkheer', 'Dona'], 'Rest').replace('Mme', 'Mrs').replace('Ms', 'Miss').replace('Sir', 'Mr')
test.Title = test.Title.replace(['Don', 'Rev', 'Dr', 'Major', 'Lady', 'Mlle', 'Col', 'Capt', 'Countess', 'Jonkheer', 'Dona'], 'Rest').replace('Ms', 'Miss')
title_map = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Rest': 5}
for _ in (train, test):
_['Title'] = _['Title'].map(title_map)
for _ in (train, test):
_.loc[(_.Age.isna()) & (_.Title == 1), 'Age'] = round(_[_.Title == 1].Age.mean(), 2)
_.loc[(_.Age.isna()) & (_.Title == 3), 'Age'] = round(_[_.Title == 3].Age.mean(), 2)
_.loc[(_.Age.isna()) & (_.Title == 2), 'Age'] = round(_[_.Title == 2].Age.mean(), 2)
_.loc[(_.Age.isna()) & (_.Title == 4), 'Age'] = round(_[_.Title == 4].Age.mean(), 2)
_.loc[(_.Age.isna()) & (_.Title == 5), 'Age'] = round(_[_.Title == 5].Age.mean(), 2)
for _ in (train, test):
for i in range(5):
_.loc[(_.Age > age_bins[i]) & (_.Age <= age_bins[i+1]), 'Age'] = i
# Survival Rate by Title
# print(train[['Title', 'Survived']].groupby('Title').mean().sort_values(by='Survived', ascending=False))
# Survival Rate by Age group
# print(train[['Age', 'Survived']].groupby('Age').mean().sort_values(by='Survived', ascending=False))
for _ in (train, test):
for i in range(4):
_.loc[(_.Fare > fare_bins[i]) & (_.Fare <= fare_bins[i+1]), 'Fare'] = i
# Survival Rate by Fare group
# print(train[['Fare', 'Survived']].groupby('Fare').mean().sort_values(by='Survived', ascending=False))
# Survival Rate by Cabin assigned
# print(train[['CabinAssigned', 'Survived']].groupby('CabinAssigned').mean().sort_values(by='Survived', ascending=False))
# Dropping irrelevant data
columns_to_drop = ['PassengerId', 'Name', 'SibSp', 'Parch', 'Cabin', 'Ticket']
for _ in (train, test):
_.drop(columns=columns_to_drop, axis=1, inplace=True)
X_train = train.drop(columns=['Survived'], axis=1)
Y_train = train['Survived']
X_test = test.copy()
logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_train, Y_train) * 100, 2)
test = pd.read_csv(path+r'\test.csv')
submission = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': Y_pred})
file_name = 'submission.csv'
submission.to_csv(path+r'\\'+file_name, index=False)
|
20,416 | aad745facffb21761c081cbe145469368a1a1721 | # Mouse Events:
# lef click, right click, double left click etc.
# Import necessary libraries:
import numpy as np
import cv2
# All events in cv2:
# events = [i for i in dir(cv2) if 'EVENT' in i]
# print(events)
# Mouse event callback function:
def click_event(event, x, y, flags, param):
# Print co-ordinate for left button click
if event == cv2.EVENT_LBUTTONDOWN:
print(x, ", ", y)
font = cv2.FONT_HERSHEY_PLAIN
x_y = str(x) + ', ' + str(y)
cv2.putText(img, x_y, org=(x, y), fontFace=font, fontScale=1, color=(255, 255, 0), thickness=1)
cv2.imshow("MOUSE_EVENTS", img)
# Print BGR channel for right button click
elif event == cv2.EVENT_RBUTTONDOWN:
blue = img[y, x, 0]
green = img[y, x, 1]
red = img[y, x, 2]
font = cv2.FONT_HERSHEY_PLAIN
bgr = str(blue) + ', ' + str(green) + ', ' + str(red)
cv2.putText(img, bgr, org=(x, y), fontFace=font, fontScale=1, color=(0, 255, 255), thickness=1)
cv2.imshow("MOUSE_EVENTS", img)
# img = np.zeros((512, 512, 3))
img = cv2.imread("Pictures\Lenna.png")
cv2.imshow("Lenna", img)
cv2.setMouseCallback('Lenna', click_event)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
20,417 | 342eaa2099a03e2e523486fbe8a5b4d0d73ec8ee | #5th Sep, 2022, Fractal, practise
#Binary Search Tree
class TreeNode(object):
def __init__(self, data) -> None:
self.left = None
self.right = None
self.data = data
class Tree(object):
def __init__(self) -> None:
self.root = None
def add(self, data):
if self.root == None:
self.root = TreeNode(data)
else:
self._add(self.root, data)
def _add(self, node, data):
if data > node.data:
if node.right:
self._add(node.right, data)
else:
node.right = TreeNode(data)
if data < node.data:
if node.left:
self._add(node.left, data)
else:
node.left = TreeNode(data)
def levelOrder(self):
nodes = [self.root]
while nodes:
node = nodes.pop(0)
print(node.data, end=" ")
if node.left:
nodes.append(node.left)
if node.right:
nodes.append(node.right)
def dfs(self):
self._dfs(self.root)
def _dfs(self, node):
if node !=None:
self._dfs(node.left)
self._dfs(node.right)
print(node.data, end=" ")
t = Tree()
t.add(10)
t.add(5)
t.add(20)
t.add(4)
t.add(6)
t.add(15)
t.add(25)
# print('-----In Order-----')
# t.inOrder()
# print()
# print ('-----Post Order-----')
# t.postOrder()
# print()
# print('-----Pre Order-----')
# t.preOrder()
# print()
print ('----Level Order Traversal----')
t.levelOrder()
print()
print(t.dfs())
|
20,418 | 694ae57c816343750632c952e571b620d0de8c09 | from typing import *
import pytest
from .. import solution
@pytest.mark.parametrize(
"p,expected",
[
["(()())()", "(()())()"],
[")(", "()"],
["()))((()", "()(())()"],
[")()()()(", "(((())))"],
],
)
def test_solution(p: str, expected: str) -> None:
assert solution(p) == expected
|
20,419 | a66e982e3a611201554f3989d8a3756a24f996b3 | board = [["-", "-", "-"], ["-", "-", "-"], ["-", "-", "-"]]
win = 0
def print_board():
print("")
n = 0
for i in board:
print(str(board[n][0]) + " | " + str(board[n][1]) + " | " + str(board[n][2]))
n += 1
print("")
def move_x():
position = input("Choose a position from 1 to 9: ")
if position.isdigit():
position = int(position)
if 1 <= position <= 3 and board[0][position - 1] == '-':
board[0][position - 1] = "x"
elif 4 <= position <= 6 and board[1][position - 4] == '-':
board[1][position - 4] = "x"
elif 7 <= position <= 9 and board[2][position - 7] == '-':
board[2][position - 7] = "x"
else:
print("Wrong move!")
print("")
move_x()
else:
print("Wrong move!")
print("")
move_x()
def move_o():
position = input("Choose a position from 1 to 9: ")
if position.isdigit():
position = int(position)
if 1 <= position <= 3 and board[0][position - 1] == '-':
board[0][position - 1] = "o"
elif 4 <= position <= 6 and board[1][position - 4] == '-':
board[1][position - 4] = "o"
elif 7 <= position <= 9 and board[2][position - 7] == '-':
board[2][position - 7] = "o"
else:
print("Wrong move!")
print("")
move_o()
else:
print("Wrong move!")
print("")
move_o()
def x_win(win):
if board[0][0] == board[0][1] == board[0][2] == 'x':
print("Congratulations, x won!")
return win == 1
elif board[1][0] == board[1][1] == board[1][2] == 'x':
print("Congratulations, x won!")
return win == 1
elif board[2][0] == board[2][1] == board[2][2] == 'x':
print("Congratulations, x won!")
return win == 1
elif board[0][0] == board[1][0] == board[2][0] == 'x':
print("Congratulations, x won!")
return win == 1
elif board[0][1] == board[1][1] == board[2][1] == 'x':
print("Congratulations, x won!")
return win == 1
elif board[0][2] == board[1][2] == board[2][2] == 'x':
print("Congratulations, x won!")
return win == 1
elif board[0][0] == board[1][1] == board[2][2] == 'x':
print("Congratulations, x won!")
return win == 1
elif board[0][2] == board[1][1] == board[2][0] == 'x':
print("Congratulations, x won!")
return win == 1
def o_win(win):
if board[0][0] == board[0][1] == board[0][2] == 'o':
print("Congratulations, o won!")
return win == 1
elif board[1][0] == board[1][1] == board[1][2] == 'o':
print("Congratulations, o won!")
return win == 1
elif board[2][0] == board[2][1] == board[2][2] == 'o':
print("Congratulations, o won!")
return win == 1
elif board[0][0] == board[1][0] == board[2][0] == 'o':
print("Congratulations, o won!")
return win == 1
elif board[0][1] == board[1][1] == board[2][1] == 'o':
print("Congratulations, o won!")
return win == 1
elif board[0][2] == board[1][2] == board[2][2] == 'o':
print("Congratulations, o won!")
return win == 1
elif board[0][0] == board[1][1] == board[2][2] == 'o':
print("Congratulations, o won!")
return win == 1
elif board[0][2] == board[1][1] == board[2][0] == 'o':
print("Congratulations, o won!")
return win == 1
def clear_board():
board[0][0] = '-'
board[0][1] = '-'
board[0][2] = '-'
board[1][0] = '-'
board[1][1] = '-'
board[1][2] = '-'
board[2][0] = '-'
board[2][1] = '-'
board[2][2] = '-'
def game():
print_board()
i = 0
while True:
print("X TURN")
move_x()
print_board()
if x_win(1):
print("Game over!")
break
if i == 8:
print("It's a Tie!")
print("Game over!")
break
i += 1
print("O TURN")
move_o()
print_board()
if o_win(1):
print("Game over!")
break
i += 1
again = input("Wanna play again?(y/n): ")
if again == 'y':
clear_board()
game()
print("Welcome to the tic-tac-toe game!")
game() |
20,420 | 2ed25b46f0d6a9f790a96806b7edcdadb80eb5fd | #encoding=utf-8
import logging
import time
def print_calling(fn):
def wrapper(*args1, ** args2):
s = "calling function %s"%(fn.__name__)
logging.info(s)
start = time.time()
ret = fn(*args1, **args2)
end = time.time()
# s = "%s. time used = %f seconds"%(s, (end - start))
s = "function [%s] has been called, taking %f seconds"%(fn.__name__, (end - start))
logging.debug(s)
return ret
return wrapper
def print_test(fn):
def wrapper(*args1, ** args2):
s = "running test: %s..."%(fn.__name__)
logging.info(s)
ret = fn(*args1, **args2)
s = "running test: %s...succeed"%(fn.__name__)
logging.debug(s)
return ret
return wrapper
def print_calling_in_short(fn):
def wrapper(*args1, ** args2):
start = time.time()
ret = fn(*args1, **args2)
end = time.time()
s = "function [%s] has been called, taking %f seconds"%(fn.__name__, (end - start))
logging.debug(s)
return ret
return wrapper
|
20,421 | e067fb8fe5ca6b13ac8e4fa089c19904e7413fc8 | #Create a binary file with name and roll number. Search for a given roll number and display the name,
# if not found display appropriate message.
|
20,422 | 32a2613c152e18bae20292aa31fef22537a71f52 | #!/usr/bin/env python
# Version 1.1
# Release Date: Mon, 9 Mar, 2020 at 08:40 PM
import sys
import os
import socket
import subprocess
import yaml
import random
import string
import time
# below port will be provided in command line
TCP_PORT = 5005
# gamin command won't be that long
BUFFER_SIZE = 100
# Secret path and how often to refresh
SEC_PATH = "will be under logs/.tg_daemon.secret"
SEC_MAX = 10
SEC_Count = 0
ERROR_RETURN=None
# now build the bricks
def validate_cmd(cmd):
allowed_cmds=[
"start",
"stop",
"log",
"status",
"restart"]
if cmd[0] in allowed_cmds:
return enforce_yes(cmd)
else:
return False
def enforce_yes(cmd):
global ERROR_RETURN
# some command asks "Yes" so -y must be provided
if cmd[0] == "stop" or cmd[0] == "restart":
for opt in cmd[1:]:
if opt == "-y" \
or opt == "-fy" \
or opt == "-yf":
return True
ERROR_RETURN = cmd[0] + " must have -y option"
return False
return True
def time_print(str):
t = time.time()
tstring=time.strftime('%Y-%m-%d %H:%M %Z', time.localtime(t))
print "["+tstring+"] " + str
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def init_sec_path():
global SEC_PATH
cfg = gsql_cfg()
log_dir = cfg["tigergraph.log.root"]
SEC_PATH = os.path.join(log_dir, ".tg_daemon.secret")
def refresh_secret():
global SEC_PATH
global SEC_Count
new_secret = randomString(32)
if os.path.isfile(SEC_PATH):
with open(SEC_PATH) as fp:
curren_secret = fp.readline()
else:
# start or missing secret file
curren_secret = new_secret
SEC_Count = 0
if SEC_Count % SEC_MAX == 0:
with open(SEC_PATH, 'w') as out_file:
out_file.write(new_secret)
SEC_Count = SEC_Count + 1
return curren_secret
def gsql_cfg():
cfg_file=os.path.join(os.path.expanduser("~"), ".gsql/gsql.cfg")
if not os.path.isfile(cfg_file):
raise FileNotExistException(cfg_file)
f = open(cfg_file)
content = yaml.safe_load(f)
f.close()
return content
def format_cmd(mystring):
cmd = mystring.lower().split()
if len(cmd) < 2:
return None
else:
return cmd
def validate_secret(client_secret):
if client_secret == refresh_secret():
return True
else:
return False
def run_gadmin_cmd(org_cmd):
cmd = format_cmd(org_cmd)
validate_state = 0
if cmd is not None:
validate_state = 1
if validate_secret(cmd[0]):
validate_state = 2
if validate_cmd(cmd[1:]):
validate_state = 3
cmd[0] = "gadmin"
process = subprocess.Popen(cmd, shell=False, close_fds=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
output = process.communicate()[0].strip()
return output
if validate_state == 0:
return "Error: empty command"
elif validate_state == 1:
return "Error: wrong secret"
elif validate_state == 2:
global ERROR_RETURN
if ERROR_RETURN is not None:
return "Error: " + ERROR_RETURN
else:
return "Error: not supported command: " + " ".join(cmd[1:])
else:
return "Invalid command: " + org_cmd
def parse_argv():
global TCP_PORT
# count the arguments
arguments = len(sys.argv) - 1
if arguments != 1:
time_print('No port specified, using default ' + str(TCP_PORT))
else:
TCP_PORT = int(sys.argv[1])
if __name__ == '__main__':
parse_argv()
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the address given on the command line
server_address = ('', TCP_PORT)
sock.bind(server_address)
time_print('starting up on %s port %s' % sock.getsockname())
sock.listen(1)
init_sec_path()
refresh_secret()
while True:
time_print('waiting for a connection')
connection, client_address = sock.accept()
try:
time_print('client connected: ' + client_address[0] + ":" + str(client_address[1]))
#while True:
data = connection.recv(BUFFER_SIZE)
time_print('received "%s"' % data)
if data:
result = run_gadmin_cmd(data)
connection.sendall(result)
time_print("done")
else:
break
finally:
connection.close()
|
20,423 | 61f964158ada97eca70b2bfc782c3322bf41f38a | from typing import List
from math import prod
FORMAT = """
Jolt step product: {0}
Adapter variations: {1}
"""
def solve(in_stream):
connectors = [int(line) for line in in_stream]
steps = shortest_steps(connectors)
step_counts = count(steps)
return step_counts[0] * step_counts[1], variations(steps)
def shortest_steps(connectors: List[int]):
"""
Compute the sequence of steps (differences) by connecting all adapters
"""
connectors = sorted(connectors)
return [nxt-prv for prv, nxt in zip([0]+connectors, connectors)] + [3]
def count(steps: List[int]):
"""Get the count of 1-step and 3-step jolt connections"""
# this needs two passes but does them with a builtin
# the factor 2x should be much smaller than the Python vs Builtin factor
return steps.count(1), steps.count(3)
def variations(steps: List[int]):
"""
Compute the variations to connect the outlet to the device given adapter steps
.. note::
This algorithm relies on steps being either 1 or 3.
"""
# we can only vary parts *between* 3-step connectors
fixed_indices = [i for i, step in enumerate(steps) if step == 3]
lengths = [
next_i - prev_i - 1 for prev_i, next_i in zip(
[-1] + fixed_indices, fixed_indices
)
]
# cache for variations of each sequence length we need
cache = [1, 1, 2, 4]
for i in range(len(cache), max(lengths) + 1):
# f(n) = f(n-1) + f(n-2) + f(n-3)
# = f(n-1) + f(n-2) + f(n-3) + f(n-4) - f(n-4)
# = 2 f(n-1) - f(n-4)
cache.append(2 * cache[i-1] - cache[i-4])
return prod(map(cache.__getitem__, lengths))
|
20,424 | b1026b3129f789cfa7fb4ff15bfb36c0d3fbf47f | from ex2.strategy.knapStrategy import KnapStrategy
from ex2.knap_enums.knaptype_enum import KnapTypeEnum
from ex2.classes.knapInstance import KnapInstance
from ex2.classes.knapInstanceSolution import KnapInstanceSolution
from ex2.classes.knapNode import KnapNode
class DynamicProgrammingByCostStrategy(KnapStrategy):
def __init__(self, name, knaptype):
KnapStrategy.__init__(self, name, knaptype)
self.StrategyType = KnapTypeEnum.CONSTRUCTIVE
self.instance = None
self.candidateSolution = None
self.maxProfit = 0
self.W = [[]]
def runStrategy(self, instance):
self.recursionDepth = 0
self.instance = instance
# initial setup: knapsack empty.
capacity = instance.capacity
itemnumber = instance.itemnumber
items = instance.items
currentItem = items[0]
xList = [0 for i in range(itemnumber)]
# decomposition by cost
# calculate sum of cost for the instance items as we can limit our map to this value.
# also calculate total weight of all items to use as case without solution (instead of +infinity)
sumCost = 0
sumWeight = 0
for it in items:
sumCost = sumCost + it.getCost()
sumWeight = sumWeight + it.getWeight()
# create 2-dimensional array to store computation results
# the rows contain the cost values, the columns the items
# after computation, the array will be of size [sumCost+1] rows and [itemnumber+1] columns
# access the array as [columns][rows]
self.W = [[None for j in range(sumCost+1)] for i in range(itemnumber+1)]
# fill trivial cases
# when no items are added, the weight of the knapsack is considered +inf (the cost values cannot be achieved)
for i in range(sumCost+1):
self.W[0][i] = sumWeight
# W[0][0] initialized to 0
self.W[0][0] = 0
# run dynamic solver
self.recursionDepth = self.dynamicSolver(sumCost, sumWeight, items)
# find the field containing the result
res = 0
resIndex = 0
# iterate from lowest cost value to highest. Whenever the weight fits into the knapsack we update cost value.
for i in range(sumCost+1):
val = self.W[itemnumber][i]
if val <= capacity:
res = val
resIndex = i
solList = self.reconstructSolutionByCost(resIndex, itemnumber, items, xList)
self.valid(instance, solList)
return self.recursionDepth
def dynamicSolver(self, cost, weight, items):
recursionDepth = 0
for it in items:
for w in range(cost+1):
recursionDepth = recursionDepth + 1
# two nested for loops.
# the complexity is O(number of items * (sumCost+1))
weightWithoutItem = self.W[it.id][w] # this works because we filled the trivial cases with +inf
weightWithItem = weight # initialize to +inf, we will update later
if w >= it.getCost(): # check if current items fits cost
weightWithItem = it.getWeight() # if yes, the field value is at least the value of the item
remainingCost = w - it.getCost() # lets see how much cost is left
weightWithItem = weightWithItem + self.W[it.id][remainingCost] # add value from the previous column
if weightWithItem < weightWithoutItem: # add the higher value to the map
self.W[it.id+1][w] = weightWithItem
else:
self.W[it.id+1][w] = weightWithoutItem
return recursionDepth
def reconstructSolutionByCost(self, cost, index, items, xList):
# index equals item with id [index-1]
if index <= 0:
# last item considered, return solution
return xList
res = self.W[index][cost]
# if the value stays the same, the item is not used in the solution
if self.W[index-1][cost] == res:
xList[index-1] = 0
else:
xList[index-1] = 1
cost = cost - items[index-1].getCost()
xList = self.reconstructSolutionByCost(cost, index-1, items, xList)
return xList
|
20,425 | 702a9b2d2599fb6f457dd62568522433a400462c |
def sort(seq):
"""
Implementation of basic quick sort algorithm
:param seq: an integer list
:return: new sorted integer list
"""
index = partition(seq)
return sort(partition(seq[:index])) + seq[index] + sort(partition(seq[index+1:]))
# Helping methods
def partition(seq):
"""
Implementation of partition function
:param seq:
:return:
"""
return 0
|
20,426 | 039c4f72f6080ec27a56c85f678d23da28752813 | import psycopg2
import argparse
import sys
import pandas as pd
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from sqlalchemy import create_engine
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
import pandas.io.sql as psql
if __name__ == '__main__':
#2.1 The arguments
parser = argparse.ArgumentParser(
description = 'The train and test table creation'
)
parser.add_argument(
'db_user',
type=str,
help='The name of the user'
)
parser.add_argument(
'in_password',
type=str,
help='The password for the user'
)
args = parser.parse_args()
#2.2 Connect to DB
conn = psycopg2.connect(
"dbname='hm_crdt' user='{}' password='{}'".format(args.db_user, args.in_password)
)
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
#2.3 Connect to SQLalchemy engine
engine = create_engine('postgresql://postgres:{}@localhost:5432/hm_crdt'.format(args.in_password))
print('Extracting from DB')
#2.4 extract full table
#2.4.1 must do it in chunks to save memory
Full_DF_chunk = pd.read_sql('''
select *
from abt.abt_full
;'''
, con=conn
, chunksize=10000
)
Full_DF = pd.DataFrame([])
for chunk in Full_DF_chunk:
Full_DF = Full_DF.append(chunk)
Full_DF = Full_DF.set_index(['sk_id_curr']).copy()
Full_DF = pd.get_dummies(Full_DF)
col_list = list(Full_DF.columns)
#2.4.2 change all columns to lower case. postgres only likes lower case
Full_DF.columns = [i.strip().lower().replace(' ','_').replace('/','_').replace(',','_').replace('+','') for i in col_list]
#2.5 Train and test creation
Full_DF_kagl = Full_DF[pd.isna(Full_DF['target'])] #These are cases from the kaggle test test, they have no target labels
Train_Test = Full_DF[~pd.isna(Full_DF['target'])]
Train_Test['target'] = Train_Test['target'].astype(int).copy()
#2.5.1 The train and test split
Train , Test = train_test_split(Train_Test, test_size=0.2, random_state=198666, stratify = Train_Test['target'])
#2.5.2 Check if Strafied correctly
sum(Train['target'])/Train['target'].count()
sum(Test['target'])/Test['target'].count()
#2.6 Outlier Detection and removal
OD_Model = IsolationForest(random_state=198666).fit(Train.drop('target',axis=1))
Outlier_array = OD_Model.predict(Train.drop('target',axis=1))
Train = Train[Outlier_array == 1].copy()
#2.7 Push to database
print('Pushing Kaggle submission dataset to DB')
Full_DF_kagl.to_sql('abt_kaggle_submission', engine, schema='abt', if_exists='replace', chunksize=10000)
print('Pushing Train dataset to DB')
Train.to_sql('abt_train', engine, schema='abt', if_exists='replace', chunksize=10000)
print('Pushing Test dataset to DB')
Test.to_sql('abt_test', engine, schema='abt', if_exists='replace', chunksize=10000)
#Train_SMOTE.to_sql('abt_train_smote', engine, schema='abt', if_exists='replace')
conn.close() |
20,427 | 78eb27f7ba8a3815b7e3764ee9377df47cb23624 | from django.urls import path, include
from .views import *
from django.contrib.auth.views import LoginView
urlpatterns = [
path('my/', UserUpdateView.as_view(), name='profile'),
path('password-change/', PasswordChangeView.as_view(), name='password_change'),
path('logout/', LogoutView.as_view(), name='logout'),
path('saved/', SavedArticleView.as_view(), name='saved'),
path('history/', HistoryView.as_view(), name='history'),
path('topic-add/', TopicAddView.as_view(), name='topic_add'),
path('topic-organize/', TopicOrganizeView.as_view(), name='topic_organize'),
path('article-save/', SaveArticle.as_view(), name='article_save'),
]
|
20,428 | d1d1aa1f9f9926f40a81c4828ccde59166768f52 | import random
x = []
for i in range(10):
x.append(random.randint(1,100))
print(x)
maxval = x[0]
for i in range(0,len(x),1):
if x[i] > maxval:
maxval = x[i]
print("最大値",maxval) |
20,429 | 9b44cc80dde8a9b699451489a4df237b6774fab8 | import cv2
import numpy as np
import os
from time import sleep
import DetectChars
import DetectPlates
import PossiblePlate
from pirc522 import RFID
import signal
import RPi.GPIO as GPIO
from threading import Thread
from get_dist import Sensor
import Functions as F
import entrance
H_P=2
N_P=4
Handicaped_Users=['']
Normal_Users=['']
GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
rdr = RFID()
util = rdr.util()
util.debug = True
Car= F.Car_Control
entry_gate = entrance.Plates(0) #definition of entrance camera
exit_gate = entrance.Plates(1)#definition of exit camera
Plates=['']
def entrance_cam():
plate_entry=entry_gate.take_pic() #gelen aracin plaka goruntusunu al
i=Car.get_plates()#kayitli tum plakalari getir
if plate_entry in i: #tum plakalar ile karsilastirma islemi yap eger engelli ise
if len(Handicaped_Users)==H_P:
if len(Normal_Users)==N_P:
print('Otopark Tamamem dolu')
#buraya kadar engelli ve normal park alanlari dolu ise
#otopark dolu mesajini kullaniciya bildir.
else:
Normal_Users.append(plate_entry)
Car.add_handicaped(plate_entry)
''' eger engelli park alani dolu ve normal park alaninda
bos alan varsa gelen araci oraya yonlendir
'''
else:
#eger engelli park alaninda bos alan var ise
# #gelen engelli aracini oraya yonlendir
Handicaped_Users.append(plate_entry)
Car.add_handicaped(plate_entry)
else: # eger gelen arac engelli araci degilse
if len(Normal_Users)==N_P:
print('Otopark Tamamem dolu')
else:
Normal_Users.append(plate_entry)
Car.add_normal(plate_entry)
def exit_cam():
plate_exit=exit_gate.take_pic()
j=Car.get_plates()
if plate_exit in j:
if plate_exit in Handicaped_Users:
Handicaped_Users.remove[plate_exit]
elif plate_exit in Normal_Users:
Normal_Users.remove[plate_exit]
else:
pass
elif plate_exit in Normal_Users:
Normal_Users.remove[plate_exit]
if len(Handicaped_Users)>0:
park_no=get_parked_areas()
check_distances(park_no)
def Read_Card():
rdr.wait_for_tag()
(error, data) = rdr.request()
if not error:
print("Kart Algilandi!")
(error, uid) = rdr.anticoll()
if not error:
kart_uid = str(uid[0])+" "+str(uid[1])+" "+str(uid[2])+" "+str(uid[3])+" "+str(uid[4])
asd.take_pic()
print(kart_uid)
sleep(1)
card_th = Thread(target=lambda q, arg1: q.put(Read_Card()), args=(que,))
threcam_thad2.start()
threads_list.append(card_th)
entry_th = Thread(target=lambda q, arg1: q.put(entrance_cam(arg1)), args=(que,))
entry_th.start()
threads_list.append(entry_th)
exit_th = Thread(target=lambda q, arg1: q.put(exit_cam(arg1)), args=(que,))
exit_th.start()
threads_list.append(exit_th)
for t in threads_list:
t.join() |
20,430 | 268baa847371f843a131d1978124b6cd8eb1debf | def solution(food_times, k):
food_times_list = []
totalTime = 0
for i in range(0, len(food_times)):
food_times_list.append([i, food_times[i]])
totalTime+=food_times[i]
if totalTime <= k:
return -1
food_times_list = sorted(food_times_list, key=lambda x:x[1])
delTime = food_times_list[0][1]*len(food_times_list)
i=1
while delTime < k:
k-=delTime
delTime = (food_times_list[i][1]-food_times_list[i-1][1])*(len(food_times_list)-i)
i+=1
food_times_list = sorted(food_times_list[i-1:], key=lambda x:x[0])
# print k
return food_times_list[k%len(food_times_list)][0]+1
if __name__ == '__main__':
solution([3,1,2],5) |
20,431 | efeaee21046424be9b0b0b70811562fafaa0ef9e | from logging.handlers import TimedRotatingFileHandler
import os
import logging
class ProductionConfig:
SECRET_KEY = os.urandom(24)
sso_url = "https://auth.gddci.com"
local_url = "1001"
class DevelopmentConfig:
SECRET_KEY = os.urandom(24)
sso_url = "http://test.auth.gddci.com"
local_url = "2001"
def log():
log_mgr = logging.getLogger(__name__)
log_mgr.setLevel(logging.INFO)
if not log_mgr.handlers:
# file_handler = logging.FileHandler("../serverlog/app.log", encoding="utf-8")
file_handler = TimedRotatingFileHandler("../serverlog/app.log", when="W6", interval=1, encoding="utf-8")
formatter = logging.Formatter(fmt="%(asctime)s %(levelname)s %(filename)s %(message)s",
datefmt="%Y/%m/%d %X")
file_handler.setFormatter(formatter)
log_mgr.addHandler(file_handler)
return log_mgr
flask_config = {
"production": ProductionConfig,
"development": DevelopmentConfig
}
logger = log()
|
20,432 | 99a65ccf5845f2647cfbbd272985f96c6829ea75 | # -*- coding: utf-8 -*-
"""
pyplot_line.py
Simple line plot using matplotlib.pyplot
Daniel Thomas
October 9, 2017
"""
import matplotlib.pyplot as plt
x = [1,2,3,4]
y = [1,4,9,16]
plt.plot(x, y, 'r')
plt.show() |
20,433 | 02b4b9e5cb3b6dde606fcd546df72fb29ca4c273 |
"""
-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.
-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:
-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'
This file is part of JMOO,
Copyright Joe Krall, 2014.
JMOO is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
JMOO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with JMOO. If not, see <http://www.gnu.org/licenses/>.
-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.
-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:-:
-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'-'
"""
from pom3_teams import *
from pom3_requirements import *
import random
class pom3_decisions:
def __init__(p3d, X):
p3d.culture = X[0]
p3d.criticality = X[1]
p3d.criticality_modifier = X[2]
p3d.initial_known = X[3]
p3d.interdependency = X[4]
p3d.dynamism = X[5]
p3d.size = int(X[6])
p3d.plan = int(X[7])
p3d.team_size = X[8]
class pom3:
def simulate(p3, inputs):
# # # # # # # # # # #
# 0) Initialization #
# # # # # # # # # # #
POM3_DECISIONS = pom3_decisions(inputs)
numberOfShuffles = random.randint(2,6)
# # # # # # # # # # # # # # #
# 1) Generate Requirements #
# # # # # # # # # # # # # # #
POM3_REQUIREMENTS = pom3_requirements(POM3_DECISIONS)
# # # # # # # # # # #
# 2) Generate Teams #
# # # # # # # # # # #
POM3_TEAMS = pom3_teams(POM3_REQUIREMENTS, POM3_DECISIONS)
# # # # # # # #
# 3) Shuffle #
# # # # # # # #
for shufflingIteration in range(numberOfShuffles):
for team in POM3_TEAMS.teams:
team.updateBudget(numberOfShuffles)
team.collectAvailableTasks(POM3_REQUIREMENTS)
team.applySortingStrategy()
team.executeAvailableTasks()
team.discoverNewTasks()
team.updateTasks()
# # # # # # # # # # # # #
# 4) Objective Scoring #
# # # # # # # # # # # # #
cost_sum,value_sum,god_cost_sum,god_value_sum,completion_sum,available_sum,total_tasks = 0.0, 0.0, 0.0, 0.0, 0,0,0
for team in POM3_TEAMS.teams:
cost_sum += team.cost_total
value_sum += team.value_total
available_sum += team.numAvailableTasks
completion_sum += team.numCompletedTasks
for task in team.tasks:
if task.val.visible:
total_tasks += 1
for task in team.tasks:
if task.val.done == True:
god_cost_sum += task.val.cost
god_value_sum += task.val.value
if cost_sum == 0: our_frontier = 0.0
else: our_frontier = value_sum / cost_sum
if god_cost_sum == 0: god_frontier = 0.0
else: god_frontier = god_value_sum / god_cost_sum
if god_frontier == 0.0: score = 0.0
else: score = our_frontier / god_frontier
if completion_sum == 0: cost = 0
else: cost = cost_sum/completion_sum
if available_sum == 0: idle = 0
else: idle = 1 - completion_sum/float(available_sum)
if total_tasks == 0: completion = 0
else: completion = completion_sum/float(total_tasks)
# return [cost, score, completion, idle]
return [cost, 1-score, idle]
# Test Code
# p3 = POM3()
# p3.simulate([0.20, 1.26, 8, 0.95, 100, 10, 2, 5, 20])
|
20,434 | 5d1a5748b0c6b3cba68f22aa868bd753b0d1716a | #
# idlparse.py
#
# an example of using the parsing module to be able to process a subset of the CORBA IDL grammar
#
# Copyright (c) 2003, Paul McGuire
#
from pyparsing import Literal, CaselessLiteral, Word, Upcase, OneOrMore, ZeroOrMore, \
Forward, NotAny, delimitedList, oneOf, Group, Optional, Combine, alphas, nums, restOfLine, cStyleComment, \
alphanums, printables, empty, quotedString, ParseException, ParseResults
import pprint
bnf = None
def CORBA_IDL_BNF():
global bnf
if not bnf:
# punctuation
colon = Literal(":")
lbrace = Literal("{")
rbrace = Literal("}")
lbrack = Literal("[")
rbrack = Literal("]")
lparen = Literal("(")
rparen = Literal(")")
equals = Literal("=")
comma = Literal(",")
dot = Literal(".")
slash = Literal("/")
bslash = Literal("\\")
star = Literal("*")
semi = Literal(";")
langle = Literal("<")
rangle = Literal(">")
# keywords
any_ = Literal("any")
attribute_ = Literal("attribute")
boolean_ = Literal("boolean")
case_ = Literal("case")
char_ = Literal("char")
const_ = Literal("const")
context_ = Literal("context")
default_ = Literal("default")
double_ = Literal("double")
enum_ = Literal("enum")
exception_ = Literal("exception")
false_ = Literal("FALSE")
fixed_ = Literal("fixed")
float_ = Literal("float")
inout_ = Literal("inout")
interface_ = Literal("interface")
in_ = Literal("in")
long_ = Literal("long")
module_ = Literal("module")
object_ = Literal("Object")
octet_ = Literal("octet")
oneway_ = Literal("oneway")
out_ = Literal("out")
raises_ = Literal("raises")
readonly_ = Literal("readonly")
sequence_ = Literal("sequence")
short_ = Literal("short")
string_ = Literal("string")
struct_ = Literal("struct")
switch_ = Literal("switch")
true_ = Literal("TRUE")
typedef_ = Literal("typedef")
unsigned_ = Literal("unsigned")
union_ = Literal("union")
void_ = Literal("void")
wchar_ = Literal("wchar")
wstring_ = Literal("wstring")
identifier = Word( alphas, alphanums + "_" ).setName("identifier")
real = Combine( Word(nums+"+-", nums) + dot + Optional( Word(nums) )
+ Optional( CaselessLiteral("E") + Word(nums+"+-",nums) ) )
integer = ( Combine( CaselessLiteral("0x") + Word( nums+"abcdefABCDEF" ) ) |
Word( nums+"+-", nums ) ).setName("int")
udTypeName = delimitedList( identifier, "::", combine=True ).setName("udType")
# have to use longest match for type, in case a user-defined type name starts with a keyword type, like "stringSeq" or "longArray"
typeName = ( any_ ^ boolean_ ^ char_ ^ double_ ^ fixed_ ^
float_ ^ long_ ^ octet_ ^ short_ ^ string_ ^
wchar_ ^ wstring_ ^ udTypeName ).setName("type")
sequenceDef = Forward().setName("seq")
sequenceDef << Group( sequence_ + langle + ( sequenceDef | typeName ) + rangle )
typeDef = sequenceDef | ( typeName + Optional( lbrack + integer + rbrack ) )
typedefDef = Group( typedef_ + typeDef + identifier + semi ).setName("typedef")
moduleDef = Forward()
constDef = Group( const_ + typeDef + identifier + equals + ( real | integer | quotedString ) + semi ) #| quotedString )
exceptionItem = Group( typeDef + identifier + semi )
exceptionDef = ( exception_ + identifier + lbrace + ZeroOrMore( exceptionItem ) + rbrace + semi )
attributeDef = Optional( readonly_ ) + attribute_ + typeDef + identifier + semi
paramlist = delimitedList( Group( ( inout_ | in_ | out_ ) + typeName + identifier ) ).setName( "paramlist" )
operationDef = ( ( void_ ^ typeDef ) + identifier + lparen + Optional( paramlist ) + rparen + \
Optional( raises_ + lparen + Group( delimitedList( typeName ) ) + rparen ) + semi )
interfaceItem = ( constDef | exceptionDef | attributeDef | operationDef )
interfaceDef = Group( interface_ + identifier + Optional( colon + delimitedList( typeName ) ) + lbrace + \
ZeroOrMore( interfaceItem ) + rbrace + semi ).setName("opnDef")
moduleItem = ( interfaceDef | exceptionDef | constDef | typedefDef | moduleDef )
moduleDef << module_ + identifier + lbrace + ZeroOrMore( moduleItem ) + rbrace + semi
bnf = ( moduleDef | OneOrMore( moduleItem ) )
singleLineComment = "//" + restOfLine
bnf.ignore( singleLineComment )
bnf.ignore( cStyleComment )
return bnf
def test( strng ):
print strng
try:
bnf = CORBA_IDL_BNF()
tokens = bnf.parseString( strng )
print "tokens = "
pprint.pprint( tokens.asList() )
except ParseException, err:
print err.line
print " "*(err.column-1) + "^"
print err
print
test(
"""
/*
* a block comment *
*/
typedef string[10] tenStrings;
typedef sequence<string> stringSeq;
typedef sequence< sequence<string> > stringSeqSeq;
interface QoSAdmin {
stringSeq method1( in string arg1, inout long arg2 );
stringSeqSeq method2( in string arg1, inout long arg2, inout long arg3);
string method3();
};
"""
)
test(
"""
/*
* a block comment *
*/
typedef string[10] tenStrings;
typedef
/** ** *** **** *
* a block comment *
*/
sequence<string> stringSeq;
/* */ /**/ /***/ /****/
typedef sequence< sequence<string> > stringSeqSeq;
interface QoSAdmin {
stringSeq method1( in string arg1, inout long arg2 );
stringSeqSeq method2( in string arg1, inout long arg2, inout long arg3);
string method3();
};
"""
)
test(
r"""
const string test="Test String\n";
const long a = 0;
const long b = -100;
const float c = 3.14159;
const long d = 0x007f7f7f;
exception TestException
{
string msg;
sequence<string> dataStrings;
};
interface TestInterface
{
void method1( in string arg1, inout long arg2 );
};
"""
)
test(
"""
module Test1
{
exception TestException
{
string msg;
];
interface TestInterface
{
void method1( in string arg1, inout long arg2 )
raises ( TestException );
};
};
"""
)
test(
"""
module Test1
{
exception TestException
{
string msg;
};
};
"""
)
|
20,435 | 03afb358eda30a338b4a9f294246f3c30ef6859e | #blackjack.py
from random import shuffle
#import random
# WARNING: THIS PROGRAM CONTAINS MANY ERRORS
# AND MAY NOT SATISFY THE ASSIGNMENT REQUIREMENTS!!
# make a function that counts up the cards in a hand
def countup(hand=[]):
ace=False
total = 0
for card in range(len(hand)):
# e.g. hand[card] could be ('Queen', ' of ', 'diamonds')
cardnum = hand[card][0]
if cardnum == "Ace" :
num = 0
ace = True
elif cardnum in ("King","Queen","Jack","10"):
num = 10
else:
num = int(cardnum)
total = total + num
if ace and total > 10:
total = total + 1
else:
total = total + 11
return total
# -- return count of cards in a hand
return 0
# initialize variables
playerwins=0
playerloses =0
cards=[]
suits=["hearts","spades","diamonds","clubs"]
numbers=["Ace","King","Queen","Jack","10","9","8","7","6","5","4","3","2"]
for suit in range(len(suits)):
for num in range(len(numbers)):
cards.append((numbers[num]," of ", suits[suit]))
# cards.append(numbers[num]+" of "+suits[suit])
shuffle(cards)
gameover = False # flag to tell us if no more cards in the deck
startcard = 0
while not gameover:
# check if the game is over: if there are fewer than 5 cards
if startcard > 48:
print("Game over! Player won ", playerwins, \
" hands and lost ",playerloses)
gameover= True
break
# starting a new hand
playerhand = cards[startcard:startcard+2]
startcard = startcard + 2 # or +=1
print("Here is your starting hand: ", playerhand)
# what if the player gets 21 right away?
# --> add up the cards!!
if countup(playerhand) == 21:
# player wins automatically!!
print("Congratulations, you win!")
# continue the play loop
playerwins +=1
continue
dealerhand = cards[startcard:startcard+2]
startcard = startcard + 2 # or +=1
hitme='x'
while hitme not in "YyNn":
hitme = input("Another card? Y/N")
# we have a valid user input now
# is it hit me or stay?
if (hitme not in 'YyNn'):
print(" enter Y or N, please")
continue
# give player another card
print("hitme is ",hitme)
print('is hitme in Yy?', hitme in "Yy")
if hitme in "Yy":
playerhand.append(cards[startcard])
startcard += 1 # or startcard = startcard + 1
print(playerhand)
# check the card totals
# --> add up the cards
total = countup(playerhand)
if total > 21 :
# player loses
print ("You lost. ")
playerloses +=1
break
if total == 21 : #player wins
print("Blackjack! you win!")
break
else:
print("hitme ",hitme," not in YyNn:" , (hitme not in "YyNn"))
total = countup(playerhand)
if total >= 21:
gameover = False
continue
# No more cards dealt to player, see who won
playertot = countup(playerhand)
dealertot = countup(dealerhand)
if playertot > dealertot:
print("Player wins with total count of",\
playercount, "greater than dealer's",dealercount)
else:
print("Player loses with ",playercount,\
" less than dealer's",dealercount)
|
20,436 | 06bac0c3dbb664716b12940ebf36bd38e69797d0 | import numpy as np
import time as time
from sortedcontainers import SortedSet;
from pyGM.factor import *
from pyGM.graphmodel import *
inf = float('inf')
# Basic implementation -- flooding schedule f->v, v->f etc.
#
#
#
def LBP(model, maxIter=100, verbose=False):
beliefs_F = [ f/f.sum() for f in model.factors ] # copies & normalizes each f
beliefs_V = [ Factor([v],1.0/v.states) for v in model.X ] # variable beliefs
msg = {}
for f in model.factors:
for v in f.vars:
msg[v,f] = Factor([v],1.0) # init msg[i->alpha]
msg[f,v] = Factor([v],1.0) # and msg[alpha->i]
for t in xrange(1,maxIter+1): # for each iteration:
# Update beliefs and outgoing messages for each factor:
for a,f in enumerate(model.factors):
beliefs_F[a] = f.copy() # find f * incoming msgs & normalize
for v in f.vars: beliefs_F[a] *= msg[v,f]
beliefs_F[a] /= beliefs_F[a].sum() # divide by i->f & sum out all but Xi
for v in f.vars: msg[f,v] = beliefs_F[a].marginal([v])/msg[v,f]
# Update beliefs and outgoing messages for each variable:
for i,v in enumerate(model.X):
beliefs_V[i] = Factor([v],1.0) # find product of incoming msgs & normalize
for f in model.factorsWith(v): beliefs_V[i] *= msg[f,v]
beliefs_V[i] /= beliefs_V[i].sum() # divide by f->i to get msg i->f
for f in model.factorsWith(v): msg[v,f] = beliefs_V[i]/msg[f,v]
#for f in model.factors: # print msgs and beliefs for debugging
# for v in f.vars:
# print v,"->",f,":",msg[X[v],f].table
# print f,"->",v,":",msg[f,X[v]].table
#for b in beliefs_F: print b, b.table
#for b in beliefs_V: print b, b.table
# Compute estimate of the log partition function:
# E_b [ log f ] + H_Bethe(b) = \sum_f E_bf[log f] + \sum_f H(bf) + \sum (1-di) H(bi)
lnZ = sum([(1-len(model.factorsWith(v)))*beliefs_V[v].entropy() for v in model.X])
for a,f in enumerate(model.factors):
lnZ += (beliefs_F[a] * f.log()).sum()
lnZ += beliefs_F[a].entropy()
if verbose: print("Iter "+str(t)+": "+str(lnZ))
return lnZ,beliefs_V
#@do_profile(follow=[get_number])
def NMF(model, maxIter=100, beliefs=None, verbose=False):
"""Simple naive mean field lower bound on log(Z). Returns lnZ,[bel(Xi) for Xi in X]"""
if beliefs is None: beliefs = [Factor([Xi],1.0/Xi.states) for Xi in model.X]
lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])
for f in model.factors:
m = f.log()
for v in f.vars: m *= beliefs[v]
lnZ += m.sum()
if verbose: print("Iter 0: "+str(lnZ))
for t in xrange(1,maxIter+1): # for each iteration:
# Update all the beliefs via coordinate ascent:
for Xi in model.X: # for each variable,
bNew = 0.0 # compute E[ log f ] as a function of Xi:
for f in model.factorsWith(Xi,copy=False): # for each factor f_a, compute:
m = f.log() # E[log f_a] = \sum \log f_a \prod b_v
for v in f.vars - [Xi]: m *= beliefs[v]
bNew += m.marginal([Xi]) # sum them up to get E[log f]
bNew -= bNew.max() # (numerical issues)
bNew = bNew.exp()
bNew /= bNew.sum() # set b(Xi) = exp( E[log f] ) / Z
beliefs[Xi] = bNew
#
# Compute the lower bound on the partition function:
# E_b [ log f ] + H(b) = \sum_a E[log f_a] + \sum_i H(b_i) for independent beliefs
lnZ = sum([beliefs[Xi].entropy() for Xi in model.X])
for f in model.factors:
m = f.log()
for v in f.vars: m *= beliefs[v]
lnZ += m.sum()
if verbose: print("Iter "+str(t)+": "+str(lnZ))
return lnZ,beliefs
################ DECOMPOSITION METHODS #############################################
#@do_profile(follow=[get_number])
def DualDecomposition(model, maxIter=100, verbose=False):
""" ub,lb,xhat = DualDecomposition( model [,maxiter,verbose] )
Compute a decomposition-based upper bound & estimate of the MAP of a graphical model"""
lnF = sum( np.log(f.max()) for f in model.factors )
lnX, xhat = -np.inf, np.zeros( (len(model.X),), dtype=int)
lnR, rhat = -np.inf, np.zeros( (len(model.X),), dtype=int)
if verbose: print("Iter 0: "+str(lnF))
for t in xrange(1,maxIter+1): # for each iteration:
# Update each variable in turn:
for Xi in model.X: # for each variable,
flist = model.factorsWith(Xi, copy=False)
gamma = [f.maxmarginal([Xi]) for f in flist]
avg = np.prod(gamma)**(1.0/len(gamma))
for f,g in zip(flist,gamma): f *= avg/(g+1e-300) # !!! numerical issues...
xhat[Xi] = avg.argmax()[0] # guess a state for Xi
#
# Compute the upper bound on the maximum and the value of our current guess
lnF = sum( np.log(f.max()) for f in model.factors )
lnX = model.logValue( xhat )
if lnR < lnX: lnR = lnX; rhat[:]=xhat;
if verbose: print("Iter "+str(t)+": "+str(lnF)+" > "+str(lnX))
if (lnF == lnX): break
return lnF,lnR,rhat
def WeightedDD( factors, weights, elimOrder, direction=1.0, maxIter=100, verbose=False, stop_tol=0.0 ):
step_inner = 5;
thetas = [f.log() for f in factors]
weights = { th:wt for th,wt in zip(thetas,weights) }
logmodel = GraphModel(thetas, copy=False)
def wt_elim(f,w,pri):
elim_ord = np.argsort( [pri[x] for x in f.vars] )
tmp = f
for i in elim_ord: tmp = tmp.lsePower([f.v[i]],1.0/w[i])
return tmp
def calc_bound( thetas, weights, pri):
return sum([wt_elim(th,wt,pri) for th,wt in zip(thetas,weights)])
def calc_deriv(th,w,pri,match,Xi=None):
elim_ord = np.argsort( [pri[x] for x in th.vars] )
lnZ0 = th.copy()
lnmu = 0.0 * lnZ0
for i in elim_ord: # run over v[i],w[i] in the given elim order
lnZ1 = lnZ0.lsePower([th.v[i]],1.0/w[i])
lnZ0 -= lnZ1; # update lnmu += (lnZ0 - lnZ1)*(1.0/w[i])
lnZ0 *= (1.0/w[i]);
lnmu += lnZ0; # TODO: save a copy by assigning = lnZ0 on 1st loop?
lnZ0 = lnZ1; # then move to the next conditional
lnmu.expIP()
Hxi = 0.0
if Xi is not None:
keep = [x for x in th.vars if pri[x]>=pri[Xi]]
forH = lnmu.marginal(keep) if len(keep) < th.nvar else lnmu
Hxi = forH.entropy() - forH.sum([Xi]).entropy() if forH.nvar > 1 else forH.entropy()
return lnmu.marginal(match), Hxi
def update_weights(weights,idx,dW,stepW): # TODO only works for positive weights
wtot = 0.0
for j,wt,dw in zip(idx,weights,dW): wt[j] *= np.exp( - stepW * wt[j] * dw ); wtot += wt[j];
for j,wt,dw in zip(idx,weights,dW): wt[j] /= wtot;
def armijo(thetas,weights,pri,Xi,steps,threshold=1e-4,direction=+1, optTol=1e-8,progTol=1e-8):
import copy
f0,f1 = None, calc_bound(thetas,weights,pri) # init prev, current objective values
match = reduce(lambda a,b: a&b, [th.vars for th in thetas], thetas[0].vars)
idx = [th.v.index(Xi) for th in thetas] if Xi is not None else [] # find location of Xi in var/weight vectors
newweights = copy.deepcopy(weights) if Xi is not None else weights # copy weights if updated
for s in range(steps):
# compute gradients dPhi/dTheta, dPhi/dW (wrt parameters, weights):
dT,dW = zip(*[calc_deriv(th,wt,pri,match,Xi) for th,wt in zip(thetas,weights)])
dT,dW = list(dT),list(dW)
for dt in dT[1:]: dt -= dT[0]; dt *= -1;
dT[0] = -sum(dT[1:])
if Xi is not None:
Hbar = sum([wt[j]*dw for j,dw,wt in zip(idx,dW,weights)])
for j in range(len(dW)): dW[j] -= Hbar
# Compute gradient norms:
L0,L1,L2 = zip(*[ (d.max(),d.sum(),(d*d).sum()) for dt in dT for d in [dt.abs()]])
L0,L1,L2 = max(L0),sum(L1)+1e-300,sum(L2)
L0,L1,L2 = max(L0,max(abs(dw) for dw in dW)), L1+sum(abs(dw) for dw in dW), L2+sum(dw*dw for dw in dW)
if L0 < optTol: return # if < optTol => local optimum
step = min(1.0, 1.0/L1) if f0 is None else min(1.0, direction*(f0-f1)/L1)
step = step if step > 0 else 1.0
f0 = f1; # update "old" objective value
for dt in dT: dt *= direction*step; # premultiply step size into dT
for j in range(10):
newthetas = [th+dt for th,dt in zip(thetas,dT)] # step already pre-multiplied
if Xi is not None: update_weights( newweights, idx, dW, step );
f1 = calc_bound(newthetas,newweights,pri)
#print " ",f0," => ",f1, " (",f0-f1,' ~ ',stepsize*threshold*gradnorm,")"
if (f0 - f1)*direction > step*threshold*L2: # if armijo "enough improvement" satisfied
for th,nth in zip(thetas,newthetas): th.t[:] = nth.t # rewrite tables
for j,wt,w2 in zip(idx,weights,newweights): wt[j] = w2[j];
break;
else: # ow, back off
step *= 0.5;
if step*L0 < progTol: return # if < progTol => no progress possible
for dt in dT: dt *= 0.5
elimOrder = np.asarray(elimOrder);
pri = np.zeros((elimOrder.max()+1,))
pri[elimOrder] = np.arange(len(elimOrder))
#
lnZw = calc_bound(thetas,[weights[th] for th in thetas],pri)
start_time = time.time()
if verbose: print("Iter 0: "+str(lnZw))
for t in xrange(1,maxIter+1): # for each iteration:
# Update each variable in turn:
for Xi in logmodel.X: # for each variable,
theta_i = logmodel.factorsWith(Xi)
if len(theta_i) <= 1: continue;
weight_i = [weights[th] for th in theta_i]
armijo(theta_i,weight_i,pri,Xi, 5, 0.01, direction)
#
# Compute the upper bound on the maximum and the value of our current guess
prev, lnZw = lnZw, calc_bound(thetas,[weights[th] for th in thetas],pri)
if verbose: print("[{}] Iter {} : {}".format(time.time()-start_time,t,lnZw));
if (prev - lnZw)*direction < stop_tol: break
return lnZw, thetas
|
20,437 | b9ebbfd7cd96ecefc0cda7751a11a7f77ed8cc56 | # -*- coding: utf-8 -*-
''' Problem 3
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143 ?
600851475143の最大素因数を求める
'''
if __name__ == '__main__':
divisor = 3
target = 600851475143
while divisor < target:
quotient, remainder = divmod(target, divisor)
if remainder == 0:
print('%d can be divided by %d' % (target, divisor))
target = quotient
else:
divisor += 2
print('largest prime factor is %d' % divisor)
|
20,438 | 21ab7a0bddc9da29017efda0ecfb8c56074b186b | '''
author: ming
ming.song.cn@outlook.com
copyright@2020
'''
import os
from pdb import Pdb
import sys
import numpy as np
import torch
from torch.optim import *
from torch import nn, optim, cuda
from torch.utils.data import Dataset, DataLoader
# from torch.utils.data import
from sklearn import preprocessing
import copy
from random import sample
from math import isnan
import datetime
import pickle
from scgkit2.signal.signal_distort import signal_distort
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from pdb import set_trace as st
import warnings
warnings.filterwarnings("ignore")
# batch_size = 1000
# test_only = False
# VISUAL_FLAG = False
# # test_only = bool(sys.argv[1])
# lr = 0.001
# dim_feature = 100
def get_size(input_shape,k_size,max_pool_k_size, layers):
for i in range(layers):
if i == 0:
size = int((input_shape - k_size + 1)/max_pool_k_size)
elif i == layers-1:
size = int((size - k_size + 1))
else:
size = int((size - k_size + 1)/max_pool_k_size)
return size
class Initial_Dataset(Dataset):
"""docstring for ."""
def __init__(self, X, Y): # before this length is data, after is label
self.array_Tx = X
self.array_Ty = Y
def __getitem__(self, index):
data_ = self.array_Tx[index, :]
gt_ = self.array_Ty[index, :] #
return data_, gt_
def __len__(self):
return self.array_Tx.shape[0]
#
# class CNN_LSTM_Net(nn.Module):
# """docstring for CNN_LSTM_Net."""
#
# def __init__(self, LOG=False):
# super(CNN_LSTM_Net, self).__init__()
# #### define layers
#
# ## CNN part
# self.conv1 = nn.Conv1d(in_channels=1, out_channels=128, kernel_size=3)
# self.conv2 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=7)
# self.conv3 = nn.Conv1d(in_channels=128, out_channels=64, kernel_size=5)
# self.batch_norm1d_1 = nn.BatchNorm1d(128)
# self.batch_norm1d_2 = nn.BatchNorm1d(256)
# self.batch_norm1d_3 = nn.BatchNorm1d(64)
#
# self.max_pool1d = nn.MaxPool1d(kernel_size=2)
# self.prelu = nn.PReLU()
# self.dropout = nn.Dropout(p=0.5)
#
# ## LSTM part
# self.lstm = nn.LSTM(input_size=746, hidden_size=128, batch_first=True, num_layers=1)
# self.decoding_layer = nn.Linear(128, 4)
#
#
#
# def forward(self, x):
# # import pdb; pdb.set_trace()
# conv1 = self.conv1(x)
# conv1 = self.batch_norm1d_1(conv1)
# conv1 = self.prelu(conv1)
# conv1 = self.dropout(conv1)
# conv1 = self.max_pool1d(conv1)
#
# conv2 = self.conv2(conv1)
# conv2 = self.batch_norm1d_2(conv2)
# conv2 = self.prelu(conv2)
# conv2 = self.dropout(conv2)
# conv2 = self.max_pool1d(conv2)
#
# out, (hid, c) = self.lstm(conv2)
# pred = self.decoding_layer(hid[0])
#
# return pred
class LSTM(nn.Module):
def __init__(self):
super().__init__()
self.lstm = nn.LSTM(input_size=3000, hidden_size=128, batch_first=True, num_layers=3)
self.decoding_layer = nn.Linear(128, 4)
#
def forward(self, input_seq):
out, (hid, c) = self.lstm(input_seq)
pred = self.decoding_layer(hid[0])
return pred
class LstmAttentionNet(nn.Module):
def __init__(self, num_layers, hidden_size, output_features):
super(LstmAttentionNet, self).__init__()
# hidden_size = 100
attention_size = hidden_size
self.lstm = nn.LSTM(input_size=1, hidden_size=hidden_size, batch_first=True, num_layers=num_layers)
self.w_omega = nn.Parameter(torch.randn(hidden_size,attention_size))
self.b_omega = nn.Parameter(torch.randn(attention_size))
self.u_omega = nn.Parameter(torch.randn(attention_size,1))
self.decoding_layer = nn.Linear(hidden_size, output_features)
def forward(self, x):
# import pdb; pdb.set_trace()
x = x.unsqueeze(2)
out, (h, c) = self.lstm(x)
v = torch.matmul(out,self.w_omega)+self.b_omega
vu = torch.matmul(v, self.u_omega)
weight= nn.functional.softmax(vu,dim=1)
out_weighted = torch.sum(out*weight,1)
y_pred = self.decoding_layer(out_weighted)
return y_pred#, weight
class CNN_Net(nn.Module):
"""docstring for CNN_Net."""
def __init__(self, input_shape, layers, output_features, out_channels, kernel_size):
super(CNN_Net, self).__init__()
#### define layers
assert len(out_channels) == layers
self.layers = layers
self.out_channels = out_channels
# ## CNN part
self.net = nn.ModuleList()
self.batch_norm = nn.ModuleList()
for i in range(layers):
if i == 0:
self.net.append( nn.Conv1d(in_channels=1, out_channels=out_channels[i], kernel_size=kernel_size) )
else:
self.net.append( nn.Conv1d(in_channels=out_channels[i-1], out_channels=out_channels[i], kernel_size=kernel_size) )
self.batch_norm.append( nn.BatchNorm1d(out_channels[i]) )
# , nn.BatchNorm1d(out_channels[i])
# self.conv1 = nn.Conv1d(in_channels=1, out_channels=128, kernel_size=5)
# self.conv2 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=5)
# self.conv3 = nn.Conv1d(in_channels=128, out_channels=64, kernel_size=5)
# self.batch_norm1d_1 = nn.BatchNorm1d(128)
# self.batch_norm1d_2 = nn.BatchNorm1d(256)
# self.batch_norm1d_3 = nn.BatchNorm1d(64)
self.max_pool1d = nn.MaxPool1d(kernel_size=3)
self.prelu = nn.PReLU()
self.dropout = nn.Dropout(p=0.5)
## LSTM part
# self.lstm = nn.LSTM(input_size=3000, hidden_size=64, batch_first=True, num_layers=1)
# self.decoding_layer1 = nn.Linear(self.flatten_size, 128)
# st()
# flatten_size =
flatten_size = get_size(input_shape=input_shape,k_size=kernel_size,max_pool_k_size=3, layers=layers )
self.decoding_layer1 = nn.Linear(flatten_size*out_channels[-1], 128)
self.decoding_layer2 = nn.Linear(128, output_features)
self.flatten = nn.Flatten()
def forward(self, x):
# import pdb; pdb.set_trace()
x = torch.unsqueeze(x, 1)
for i in range(self.layers):
if i == self.layers - 1:
x = self.net[i](x)
else:
# # st()
# self.net[i]()
x = self.net[i](x)
# st()
x = self.batch_norm[i](x)
x = torch.relu(x)
x = self.dropout(x)
x = self.max_pool1d(x)
# flatten_size = x.shape[1] * x.shape[2]
# flatten = self.flatten(x)
# self.decoding_layer1 = nn.Linear(flatten_size, 128)
# st()
flatten = self.flatten(x)
decode1 = self.decoding_layer1(flatten)
pred = self.decoding_layer2(decode1)
# st()
return pred
class AE_Net(nn.Module):
"""docstring for AE_Net."""
def __init__(self, input_shape):
super(AE_Net, self).__init__()
self.encoder_hidden_layer = nn.Linear(
in_features=input_shape, out_features=128
)
self.encoder_output_layer = nn.Linear(
in_features=128, out_features=64
)
self.decoder_hidden_layer = nn.Linear(
in_features=64, out_features=128
)
self.decoder_output_layer = nn.Linear(
in_features=128, out_features=input_shape
)
def forward(self, features):
activation = self.encoder_hidden_layer(features)
activation = torch.relu(activation)
state_logit = self.encoder_output_layer(activation)
# import pdb; pdb.set_trace()
code = torch.relu(state_logit)
activation = self.decoder_hidden_layer(code)
activation = torch.relu(activation)
activation = self.decoder_output_layer(activation)
reconstructed = activation
# reconstructed = torch.relu(activation)
# import pdb; pdb.set_trace()
return reconstructed, state_logit
class FCN_Net(nn.Module):
"""docstring for FCN_Net."""
def __init__(self, input_features, output_features, layers, neurons):
super(FCN_Net, self).__init__()
#### define layers
# self.net = []
self.net = nn.ModuleList()
for i in range(layers):
if i == 0:
self.net.append( nn.Linear(in_features=input_features, out_features=neurons) )
if i == layers-1:
self.net.append( nn.Linear(in_features=neurons, out_features=output_features) )
else:
self.net.append( nn.Linear(in_features=neurons, out_features=neurons) )
# self.dropout = nn.Dropout(p=0.5)
self.lrelu = nn.LeakyReLU()
def forward(self, x):
# import pdb; pdb.set_trace()
for ind, each_layer in enumerate(self.net):
if ind == len(self.net)-1:
pred = each_layer(x)
else:
x = each_layer(x)
x = torch.relu(x)
return pred
class FCN_Model():
"""docstring for FCN_Model."""
def __init__(self, input_features=1000, output_features=1, layers=6, neurons=20, learning_rate=0.001, batch_size=32, epoch_number=500):
super(FCN_Model, self).__init__()
####
self.device = torch.device('cuda' if cuda.is_available() else 'cpu')
self.learning_rate = learning_rate
self.batch_size = batch_size
self.epoch_number = epoch_number
# self.ae_Net = AE_Net(input_shape=input_shape)
self.reg_Net = FCN_Net(input_features=input_features, output_features=output_features, layers=layers, neurons=neurons)
# self.reg_Net = LstmAttentionNet()
# self.ae_Net = self.ae_Net.to(device = self.device)
self.reg_Net = self.reg_Net.to(device = self.device)
print(f"Using device:{self.device}")
# def fit(self, all_data, window_len, devide_factor, learning_rate=0.001, batch_size=32, epoch_number=500, CONTINUE_TRAINING = False):
def fit(self, X, Y):
# self.data = all_data
# self.window_len = X.shape[1]
self.h_norm = 90
self.r_norm = 20
self.s_norm = 200
self.d_norm = 100
# data_train, data_test = self.normalize_and_devide(all_data, window_len, devide_factor)
train_dataset = Initial_Dataset(X, Y)
# self.scaler_x, self.scaler_y = train_dataset.get_scalers()
# test_dataset = Initial_Dataset(X, Y)
# import pdb; pdb.set_trace()
train_loader = DataLoader(train_dataset, self.batch_size, shuffle=True, num_workers=4)
# test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4)
### training component
loss_fn = torch.nn.MSELoss()
optimizer_reg = optim.Adam(self.reg_Net.parameters(), lr=self.learning_rate)
scheduler_reg = lr_scheduler.StepLR(optimizer_reg,step_size=5, gamma = 0.95)
self.last_error = 1e5
for e in range(self.epoch_number):
for train_tensor_x, train_tensor_y in train_loader:
optimizer_reg.zero_grad()
# train_tensor_x_distorted = self.batch_scg_distorted(train_tensor_x, noise=0.3, sampling_rate=100, noise_frequency=[5, 10, 100])
train_tensor_x = torch.tensor(train_tensor_x,dtype=torch.float32,device=self.device)
train_tensor_y = torch.tensor(train_tensor_y,dtype=torch.float32,device=self.device)
train_y_pred_reg = self.reg_Net(train_tensor_x)
train_loss_tensor_reg = loss_fn(train_tensor_y, train_y_pred_reg)
train_loss_reg = train_loss_tensor_reg.item()
train_loss_tensor = train_loss_tensor_reg
train_loss = train_loss_reg
reg_pred_arr = train_y_pred_reg.cpu().detach().numpy().squeeze()
reg_gt_arr = train_tensor_y.cpu().detach().numpy().squeeze()
train_mae = mean_absolute_error(reg_gt_arr, reg_pred_arr)
# st()
train_loss_tensor.backward()
optimizer_reg.step()
print(f'Epoch {e} train MSE: {train_loss} ')
print(f' train REG MAE: {train_mae}')
self.error = train_mae
if self.error < self.last_error:
self.save_model(model_path='../models')
self.last_error = self.error
# st()
# if e % 5 == 0 or e == self.epoch_number-1:
# loss_test = []
# pred_list = []
# gt_list = []
# for test_tensor_x, test_tensor_y in test_loader:
# test_tensor_x = torch.tensor(test_tensor_x,dtype=torch.float32,device=self.device)
# test_tensor_y = torch.tensor(test_tensor_y,dtype=torch.float32,device=self.device)
# test_y_pred_reg = self.reg_Net(test_tensor_x)
# test_loss_tensor_reg = loss_fn(test_tensor_y,test_y_pred_reg)
# test_loss_tensor = test_loss_tensor_reg
# reg_pred_arr = test_y_pred_reg.cpu().detach().numpy().squeeze()
# reg_gt_arr = test_tensor_y.cpu().detach().numpy().squeeze()
# gt_list.append(reg_gt_arr)
# pred_list.append(reg_pred_arr)
# test_loss = test_loss_tensor.item()
# loss_test.append(test_loss)
# print(f'Epoch {e} test MSE: {np.mean(loss_test)} ')
# print(f' test REG MAE: {mean_absolute_error(gt_list, pred_list)*self.s_norm} ')
# self.error = np.mean(loss_test)
# if self.error < self.last_error:
# self.save_model(model_path='../models')
# self.last_error = self.error
# learning rate decay
scheduler_reg.step()
print('--------------------------------------------------------------')
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
def save_model(self, model_path='../models'):
print('save model...')
# with open(os.path.join(model_path,"scaler_param.pk"),"wb+") as f:
# pickle.dump([self.scaler_x,self.scaler_y,self.window_len],f)
# torch.save(self.ae_Net.state_dict(), os.path.join(model_path,"AE_model_param.pk"))
torch.save(self.reg_Net.state_dict(), os.path.join(model_path,"FCN_model_param.pk"))
# with open(os.path.join(model_path,"error.pk"),"wb+") as f:
# pickle.dump(self.error,f)
print('save done!')
# test_error_0 = self.error
def load_model(self, model_path):
# if os.path.exists(os.path.join(model_path,"scaler_param.pk")):
# with open(os.path.join(model_path,"scaler_param.pk"),"rb+") as f:
# [self.scaler_x,self.scaler_y] = pickle.load(f)
# else:
# print(f'scaler_param.pk not exist!')
# quit()
if os.path.exists(os.path.join(model_path,"FCN_model_param.pk")):
# self.ae_Net.load_state_dict(torch.load(os.path.join(model_path,"AE_model_param.pk"),map_location=torch.device(self.device)))
self.reg_Net.load_state_dict(torch.load(os.path.join(model_path,"FCN_model_param.pk"),map_location=torch.device(self.device)))
else:
print(f'model_param.pk not exist!')
quit()
print('Model parameters loaded!')
# if os.path.exists(os.path.join(model_path,"error.pk")):
# with open(os.path.join(model_path,"error.pk"),"rb+") as f:
# self.error = pickle.load(f)
# else:
# print(f'error.pk not exist!')
# quit()
def predict(self, pred_x):
pred_result = []
for each_input in pred_x:
train_tensor_x = torch.tensor(each_input,dtype=torch.float32,device=self.device)
train_y_pred_reg_tensor = self.reg_Net(train_tensor_x)
train_y_pred_reg_array = train_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
pred_result.append(train_y_pred_reg_array)
return np.array(pred_result)
# return np.round(self.train_y_pred)[0]
def evaluate(self, X,Y):
# self.data = data
test_dataset = Initial_Dataset(X, Y)
test_loader = DataLoader(test_dataset, 1, shuffle=True, num_workers=4)
gt_list = []
pred_list = []
for test_tensor_x, test_tensor_y in test_loader:
# test_tensor_x_distorted = self.batch_scg_distorted(test_tensor_x, noise=0.3, sampling_rate=100, noise_frequency=[5, 10, 100])
# test_arr_x_distorted = test_tensor_x_distorted.cpu().detach().numpy().squeeze()
test_tensor_x = torch.tensor(test_tensor_x,dtype=torch.float32,device=self.device)
test_tensor_y = torch.tensor(test_tensor_y,dtype=torch.float32,device=self.device)
# test_y_pred_ae, test_state_logit = self.ae_Net(test_tensor_x_distorted)
test_y_pred_reg_tensor = self.reg_Net(test_tensor_x)
test_y_pred_reg_arr = test_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
test_y_arr = test_tensor_y.cpu().detach().numpy().squeeze()
gt_list.append(test_y_arr)
pred_list.append(test_y_pred_reg_arr)
# st()
gt_arr = np.array(gt_list)
pred_arr = np.array(pred_list)
for i in range(gt_arr.shape[1]):
mae = mean_absolute_error(gt_arr[:,i], pred_arr[:,i])
var = np.var(abs(gt_arr[:,i] - pred_arr[:,i] ))
print(f'Target {i+1}: MAE: {mae}, VAR: {var}')
class CNN_Model():
"""docstring for CNN_Model."""
def __init__(self, input_shape, out_channels, kernel_size, output_features=1, layers=6, learning_rate=0.001, batch_size=32, epoch_number=500):
super(CNN_Model, self).__init__()
####
self.device = torch.device('cuda' if cuda.is_available() else 'cpu')
self.learning_rate = learning_rate
self.batch_size = batch_size
self.epoch_number = epoch_number
# self.ae_Net = AE_Net(input_shape=input_shape)
self.reg_Net = CNN_Net(input_shape=input_shape, layers=layers, output_features=output_features, out_channels=out_channels, kernel_size=kernel_size)
# self.reg_Net = LstmAttentionNet()
# self.ae_Net = self.ae_Net.to(device = self.device)
self.reg_Net = self.reg_Net.to(device = self.device)
print(f"Using device:{self.device}")
# def fit(self, all_data, window_len, devide_factor, learning_rate=0.001, batch_size=32, epoch_number=500, CONTINUE_TRAINING = False):
def fit(self, X, Y):
train_dataset = Initial_Dataset(X, Y)
train_loader = DataLoader(train_dataset, self.batch_size, shuffle=True, num_workers=4)
# test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4)
### training component
loss_fn = torch.nn.MSELoss()
optimizer_reg = optim.Adam(self.reg_Net.parameters(), lr=self.learning_rate)
scheduler_reg = lr_scheduler.StepLR(optimizer_reg,step_size=5, gamma = 0.95)
self.last_error = 1e5
for e in range(self.epoch_number):
for train_tensor_x, train_tensor_y in train_loader:
optimizer_reg.zero_grad()
train_tensor_x = torch.tensor(train_tensor_x,dtype=torch.float32,device=self.device)
train_tensor_y = torch.tensor(train_tensor_y,dtype=torch.float32,device=self.device)
train_y_pred_reg = self.reg_Net(train_tensor_x)
train_loss_tensor_reg = loss_fn(train_tensor_y, train_y_pred_reg)
train_loss_reg = train_loss_tensor_reg.item()
train_loss_tensor = train_loss_tensor_reg
train_loss = train_loss_reg
reg_pred_arr = train_y_pred_reg.cpu().detach().numpy().squeeze()
reg_gt_arr = train_tensor_y.cpu().detach().numpy().squeeze()
train_mae = mean_absolute_error(reg_gt_arr, reg_pred_arr)
# st()
train_loss_tensor.backward()
optimizer_reg.step()
print(f'Epoch {e} train MSE: {train_loss} ')
print(f' train REG MAE: {train_mae}')
self.error = train_mae
if self.error < self.last_error:
self.save_model(model_path='../models')
self.last_error = self.error
# learning rate decay
scheduler_reg.step()
print('--------------------------------------------------------------')
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
def save_model(self, model_path='../models'):
print('saving model...')
torch.save(self.reg_Net.state_dict(), os.path.join(model_path,"CNN_model_param.pk"))
print('save done!')
def load_model(self, model_path):
if os.path.exists(os.path.join(model_path,"CNN_model_param.pk")):
self.reg_Net.load_state_dict(torch.load(os.path.join(model_path,"CNN_model_param.pk"),map_location=torch.device(self.device)))
else:
print(f'model_param.pk not exist!')
quit()
print('Model parameters loaded!')
def predict(self, pred_x):
pred_result = []
for each_input in pred_x:
train_tensor_x = torch.tensor(each_input,dtype=torch.float32,device=self.device)
train_y_pred_reg_tensor = self.reg_Net(train_tensor_x)
train_y_pred_reg_array = train_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
pred_result.append(train_y_pred_reg_array)
return np.array(pred_result)
# return np.round(self.train_y_pred)[0]
def evaluate(self, X,Y):
# self.data = data
test_dataset = Initial_Dataset(X, Y)
test_loader = DataLoader(test_dataset, 1, shuffle=True, num_workers=4)
gt_list = []
pred_list = []
for test_tensor_x, test_tensor_y in test_loader:
# test_tensor_x_distorted = self.batch_scg_distorted(test_tensor_x, noise=0.3, sampling_rate=100, noise_frequency=[5, 10, 100])
# test_arr_x_distorted = test_tensor_x_distorted.cpu().detach().numpy().squeeze()
test_tensor_x = torch.tensor(test_tensor_x,dtype=torch.float32,device=self.device)
test_tensor_y = torch.tensor(test_tensor_y,dtype=torch.float32,device=self.device)
# test_y_pred_ae, test_state_logit = self.ae_Net(test_tensor_x_distorted)
test_y_pred_reg_tensor = self.reg_Net(test_tensor_x)
test_y_pred_reg_arr = test_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
test_y_arr = test_tensor_y.cpu().detach().numpy().squeeze()
gt_list.append(test_y_arr)
pred_list.append(test_y_pred_reg_arr)
# st()
gt_arr = np.array(gt_list)
pred_arr = np.array(pred_list)
for i in range(gt_arr.shape[1]):
mae = mean_absolute_error(gt_arr[:,i], pred_arr[:,i])
var = np.var(abs(gt_arr[:,i] - pred_arr[:,i] ))
print(f'Target {i+1}: MAE: {mae}, VAR: {var}')
class LSTM_Model():
"""docstring for LSTM_Model."""
def __init__(self, num_layers=5, hidden_size=100, output_features=4, learning_rate=0.001, batch_size=32, epoch_number=500):
super(LSTM_Model, self).__init__()
####
self.device = torch.device('cuda' if cuda.is_available() else 'cpu')
self.learning_rate = learning_rate
self.batch_size = batch_size
self.epoch_number = epoch_number
self.reg_Net = LstmAttentionNet(num_layers=num_layers, hidden_size=hidden_size, output_features=output_features)
self.reg_Net = self.reg_Net.to(device = self.device)
print(f"Using device:{self.device}")
def fit(self, X, Y):
train_dataset = Initial_Dataset(X, Y)
train_loader = DataLoader(train_dataset, self.batch_size, shuffle=True, num_workers=4)
# test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=4)
### training component
loss_fn = torch.nn.MSELoss()
optimizer_reg = optim.Adam(self.reg_Net.parameters(), lr=self.learning_rate)
scheduler_reg = lr_scheduler.StepLR(optimizer_reg,step_size=5, gamma = 0.95)
self.last_error = 1e5
for e in range(self.epoch_number):
for train_tensor_x, train_tensor_y in train_loader:
optimizer_reg.zero_grad()
train_tensor_x = torch.tensor(train_tensor_x,dtype=torch.float32,device=self.device)
train_tensor_y = torch.tensor(train_tensor_y,dtype=torch.float32,device=self.device)
train_y_pred_reg = self.reg_Net(train_tensor_x)
# st()
train_loss_tensor_reg = loss_fn(train_tensor_y, train_y_pred_reg)
train_loss_reg = train_loss_tensor_reg.item()
train_loss_tensor = train_loss_tensor_reg
train_loss = train_loss_reg
reg_pred_arr = train_y_pred_reg.cpu().detach().numpy().squeeze()
reg_gt_arr = train_tensor_y.cpu().detach().numpy().squeeze()
train_mae = mean_absolute_error(reg_gt_arr, reg_pred_arr)
# st()
train_loss_tensor.backward()
optimizer_reg.step()
print(f'Epoch {e} train MSE: {train_loss} ')
print(f' train REG MAE: {train_mae}')
self.error = train_mae
if self.error < self.last_error:
self.save_model(model_path='../models')
self.last_error = self.error
# learning rate decay
scheduler_reg.step()
print('--------------------------------------------------------------')
# import pdb; pdb.set_trace()
# import pdb; pdb.set_trace()
def save_model(self, model_path='../models'):
print('saving model...')
torch.save(self.reg_Net.state_dict(), os.path.join(model_path,"LSTM_model_param.pk"))
print('save done!')
def load_model(self, model_path='../models'):
if os.path.exists(os.path.join(model_path,"LSTM_model_param.pk")):
self.reg_Net.load_state_dict(torch.load(os.path.join(model_path,"LSTM_model_param.pk"),map_location=torch.device(self.device)))
else:
print(f'model_param.pk not exist!')
quit()
print('Model parameters loaded!')
def predict(self, pred_x):
pred_result = []
for each_input in pred_x:
train_tensor_x = torch.tensor(each_input,dtype=torch.float32,device=self.device)
train_y_pred_reg_tensor = self.reg_Net(train_tensor_x)
train_y_pred_reg_array = train_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
pred_result.append(train_y_pred_reg_array)
return np.array(pred_result)
def evaluate(self, X,Y):
# self.data = data
test_dataset = Initial_Dataset(X, Y)
test_loader = DataLoader(test_dataset, 1, shuffle=True, num_workers=4)
gt_list = []
pred_list = []
for test_tensor_x, test_tensor_y in test_loader:
# test_tensor_x_distorted = self.batch_scg_distorted(test_tensor_x, noise=0.3, sampling_rate=100, noise_frequency=[5, 10, 100])
# test_arr_x_distorted = test_tensor_x_distorted.cpu().detach().numpy().squeeze()
test_tensor_x = torch.tensor(test_tensor_x,dtype=torch.float32,device=self.device)
test_tensor_y = torch.tensor(test_tensor_y,dtype=torch.float32,device=self.device)
# test_y_pred_ae, test_state_logit = self.ae_Net(test_tensor_x_distorted)
test_y_pred_reg_tensor = self.reg_Net(test_tensor_x)
test_y_pred_reg_arr = test_y_pred_reg_tensor.cpu().detach().numpy().squeeze()
test_y_arr = test_tensor_y.cpu().detach().numpy().squeeze()
gt_list.append(test_y_arr)
pred_list.append(test_y_pred_reg_arr)
# st()
gt_arr = np.array(gt_list)
pred_arr = np.array(pred_list)
for i in range(gt_arr.shape[1]):
mae = mean_absolute_error(gt_arr[:,i], pred_arr[:,i])
var = np.var(abs(gt_arr[:,i] - pred_arr[:,i] ))
print(f'Target {i+1}: MAE: {mae}, VAR: {var}')
def main():
scaler = preprocessing.StandardScaler()
# dataset = np.load('../../data/real_data/data_label_train.1000_6.6_6.npy')
dataset = np.load('../../data/real_data/data_label_train.1000_6.npy')[:10,:]
X = dataset[:,:-6]
Y = dataset[:,-4:-2]
# dataset_test = np.load('../../data/real_data/data_label_test.1000_6.6_6.npy')
# X_test = dataset_test[:,:-6]
# Y_test = dataset_test[:,-4:-2]
# X = scaler.fit_transform(X)
# X_test = scaler.transform(X_test)
# st()
# dataset_time_sort = dataset[np.argsort( (dataset[:, -5]) )]
# np.random.shuffle(dataset)
# auto_encoder = FCN_Model(input_features=6, output_features=2, layers=30, neurons=128, learning_rate=0.0001, batch_size=32, epoch_number=500)
# auto_encoder = CNN_Model(out_channels=[64,64,32], kernel_size=5, output_features=2, layers=3, learning_rate=0.001, batch_size=32, epoch_number=500)
auto_encoder = LSTM_Model(num_layers=1, hidden_size=100, output_features=2, learning_rate=0.001, batch_size=32, epoch_number=500)
auto_encoder.fit(X, Y)
auto_encoder.load_model('../models')
auto_encoder.evaluate(X_test, Y_test)
if __name__ == '__main__':
main()
|
20,439 | 6713a59f64b4605e44073015da7ff9aa8f97fefb | import cv2 as cv
import numpy as np
import pytesseract
from random import shuffle
from imutils import contours
from skimage.segmentation import clear_border
# Preprocess given image
def preprocess():
global src
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
src_gray = cv.GaussianBlur(src_gray, (5, 5), 0) # to remove noise
src_gray = cv.adaptiveThreshold(src_gray, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY_INV, 13, 2) # for binarization to make the board clear
edge = cv.Canny(src_gray, 150, 250) # to find edges
contours, _ = cv.findContours(edge, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv.contourArea, reverse=True) # sort contours from the largest area(desc. order)
#cv.drawContours(src, contours, 0, (0, 255, 0), 3)
for i in range(len(contours)):
approx = cv.approxPolyDP(contours[i], cv.arcLength(contours[i], True) * 0.02, True)
# if the polygon has 4 vertices, that can be considered as a rectangle
if len(approx) == 4:
break # first one must be the largest
approx = approx.reshape(len(approx), np.size(approx[0]))
xSubY = np.subtract(approx[:, 0], approx[:, 1])
xAddY = approx.sum(axis=1)
src_pts = np.zeros((4, 2), dtype=np.float32)
src_pts[0, :] = approx[np.where(xAddY == np.min(xAddY))].reshape(2) # min(x+y)
src_pts[1, :] = approx[np.where(xSubY == np.max(xSubY))].reshape(2) # max(x-y)
src_pts[2, :] = approx[np.where(xAddY == np.max(xAddY))].reshape(2) # max(x+y)
src_pts[3, :] = approx[np.where(xSubY == np.min(xSubY))].reshape(2) # min(x-y)
return perspective_transform(src_pts, src_gray)
# Extract the game board from given image
def perspective_transform(src_pts, src_gray):
global src
w = int(max(abs(src_pts[1][0] - src_pts[0][0]), abs(src_pts[2][0] - src_pts[3][0])))
h = int(max(abs(src_pts[1][1] - src_pts[2][1]), abs(src_pts[0][1] - src_pts[3][1])))
dst_pts = np.array([[0, 0],
[w - 1, 0],
[w - 1, h - 1],
[0, h - 1]]).astype(np.float32)
pers_mat = cv.getPerspectiveTransform(src_pts, dst_pts)
# game board to put result in
editted = cv.warpPerspective(src, pers_mat, (w, h))
# game board to get digit to solve
preprocessed = cv.warpPerspective(src_gray, pers_mat, (w, h))
return preprocessed, editted
# Divide the grid into 9*9=81 cells
def divide_grid(preprocessed):
global board
cnts = cv.findContours(preprocessed, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
grid = preprocessed.copy()
for c in cnts:
area = cv.contourArea(c)
if area < 800:
cv.drawContours(grid, [c], -1, 0, -1)
# get vertical, horizontal lines
vertical_kernel = cv.getStructuringElement(cv.MORPH_RECT, (1, 5))
grid = cv.morphologyEx(grid, cv.MORPH_CLOSE, vertical_kernel, iterations=9)
horizontal_kernel = cv.getStructuringElement(cv.MORPH_RECT, (5, 1))
grid = cv.morphologyEx(grid, cv.MORPH_CLOSE, horizontal_kernel, iterations=4)
# sort cells from top-bottom & left-right and store them in an array
grid = 255 - grid
cnts = cv.findContours(grid, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
(cnts, _) = contours.sort_contours(cnts, method="top-to-bottom")
sudoku_rows = []
row = []
for (i, c) in enumerate(cnts, 1):
area = cv.contourArea(c)
if area < 50000:
row.append(c)
if i % 9 == 0:
(cnts, _) = contours.sort_contours(row, method="left-to-right")
sudoku_rows.append(cnts)
row = []
# Extract numbers from each cell and store them in a sudoku board
for i in range(9):
for j in range(9):
(x, y, w, h) = cv.boundingRect(sudoku_rows[i][j])
cell = preprocessed[y:y + h, x:x + w]
board[i][j] = find_number(cell)
return sudoku_rows
# Extract number from a cell
def find_number(cell):
cell = clear_border(cell)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
cell = cv.morphologyEx(cell, cv.MORPH_OPEN, kernel, iterations=1)
contours, _ = cv.findContours(cell, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
num = 0
if len(contours) != 0:
contour = max(contours, key=cv.contourArea)
if cv.contourArea(contour) > 100:
text = pytesseract.image_to_string(cell, lang="eng", config='--psm 6 --oem 3')
if '1' <= list(text)[0] <= '9':
num = int(list(text)[0])
return num
# See if the number is possible
def possible(y, x, num):
global board
for i in range(9):
if board[i][x] == num:
return False
for i in range(9):
if board[y][i] == num:
return False
col = x - x % 3
row = y - y % 3
for i in range(3):
for j in range(3):
if board[row + i][col + j] == num:
return False
return True
# Solve the puzzle
def solve():
global board, solution
numbers = np.arange(1, 10)
for y in range(9):
for x in range(9):
if board[y][x] == 0:
shuffle(list(numbers))
for num in numbers:
if possible(y, x, num):
board[y][x] = num # solved
solve() # look for another empty element(recursive)
board[y][x] = 0 # if an empty element is not solvable, make the "already solved" states empty
return # no number is possible
solution = board.copy()
# Show result in game board
def show_result(locations, editted):
global solution
for i in range(9):
for j in range(9):
(x, y, w, h) = cv.boundingRect(locations[i][j])
cv.putText(editted, str(solution[i][j]), (x+10, y+35), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
cv.imshow('solution', editted)
src = cv.imread("sudoku.jpg")
if src is None:
print('Image load failed')
exit()
#cv.imshow('src', src)
board = np.zeros((9, 9), dtype=int) # initialize the game board
solution = np.zeros((9, 9), dtype=int) # store solution
preprocessed, edited = preprocess() # preprocessed image
locations = divide_grid(preprocessed) # location of each cells
solve()
cv.imshow('game', src)
show_result(locations, edited)
cv.waitKey()
cv.destroyAllWindows()
|
20,440 | ebcb45881e69b640ded263c5d2656cff3d270630 | import scriptDeSaveELoadDeRede as sslr
import NetWorkApp as nwa
import treinadorDeRede as tr
banco = 'E:\pacote1\WorkSpaceDeRede\processoDeTreinoDeRede\Bancos2'
numero = 0
# responder [0, 0, 0, 1]
# gato [1, 0, 0, 0]
# bonito [0, 1, 0, 0]
# é ou não [0, 0, 1, 0]
rede1 = nwa.RedeRecorrente(banco, numero)
#rede1.criarRede([10, 10], 1, 1)
d = nwa.Data
dados = [
d([0, 0], [0]),
d([1, 0], [1]),
d([0, 1], [1]),
d([1, 1], [0]),
]
try:
#rede1.treinar(dados)
pass
except:
pass
print(rede1.processar([[1], [0], [1], [1], [1]]))
|
20,441 | 2efeb6a278c45c629c090378ce61a268fbdaafb2 | #https://projecteuler.net/problem=1
#If we list all the natural numbers below 10 that are multiples of 3 or 5,
#we get 3, 5, 6 and 9. The sum of these multiples is 23.
#Find the sum of all the multiples of 3 or 5 below 1000.
maxValue = 1000
sum = 0
i = 1
while i < maxValue:
if i % 3 == 0:
sum += i
elif i % 5 == 0:
sum += i
i += 1
print("Sum: %d" % sum)
|
20,442 | a5407f4c2a5ae03b07a09211278e3bd27e4129ea | from tkinter import *
from tkinter.ttk import *
class ScrollSelect(object):
def __init__(self, master, list, onselect=None):
self.master = master
self.list = list
self.scroll = Scrollbar(self.master, orient=VERTICAL)
self.listbox = Listbox(self.master, listvariable=self.list, yscrollcommand=self.scroll.set, height=6)
self.scroll.config(command=self.listbox.yview)
self.scroll.pack(side=RIGHT, fill=Y)
self.listbox.pack(side=LEFT, fill=BOTH, expand=1)
def interior_onselect(evt):
w = evt.widget
index = int(w.curselection()[0])
if (onselect is not None):
onselect(index)
self.listbox.bind('<<ListboxSelect>>', interior_onselect)
class DisplayDocument(object):
def __init__(self, master, doc):
self.master = master
class MergeDisplay(object):
def __init__(self, master, merge_tuple, result):
self.master = master
self.merge_tuple = merge_tuple
self.result = result
self.outer_frame = Frame(self.master, padding=(5, 5, 12, 0), relief='groove')
self.outer_frame.grid(column=0, row=0, sticky=(N, W, E, S))
self.master.grid_columnconfigure(0, weight=1)
self.master.grid_rowconfigure(0, weight=1)
self.merge_frame = Frame(self.outer_frame)
self.merge_frame.grid(column=0, row=0, rowspan=10, sticky=(N, W, E, S))
self.from_frame = Frame(self.outer_frame)
self.from_frame.grid(column=1, row=0, rowspan=10, sticky=(N, W, E, S))
self.select_frame = Frame(self.outer_frame, padding=(5, 5, 0, 0))
self.select_frame.grid(column=2, row=0, rowspan=7, sticky=(N, W, E, S))
self.button_frame = Frame(self.outer_frame, padding=(5, 5, 0, 0))
self.button_frame.grid(column=2, row=7, rowspan=3, sticky=(N, W, E, S))
self.outer_frame.grid_columnconfigure(0, weight=3)
self.outer_frame.grid_columnconfigure(1, weight=3)
self.outer_frame.grid_columnconfigure(2, weight=1)
for i in range(10):
self.outer_frame.grid_rowconfigure(i, weight=1)
selection_list = StringVar(value=tuple((str(i) for i in self.merge_tuple)))
self.select = ScrollSelect(self.select_frame, selection_list)
class TestDisplay(object):
def __init__(self, str, dict):
self.str = str
self.dict = dict
def __iter__(self):
return iter(self.dict)
def __getitem__(self, i):
return dict[i]
root = Tk()
merge = ("first", "second", "third", "fourth")
mergedisplay = MergeDisplay(root, merge, dict())
root.mainloop()
|
20,443 | 9ff74c9fb3d8520b946b54490013ff0e402a6ffb | from django.shortcuts import get_object_or_404, render
from django.http import JsonResponse
from django.views import generic
from rest_framework.views import APIView
from fscohort.models import Student
from django.core.serializers import serialize
import json
from django.views.decorators.csrf import csrf_exempt
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .serializers import StudentSerializer
from rest_framework import status, generics, mixins
def home_api(request):
data = {
"name" : "Ramazan",
"address" : "Clarusway",
"skills" : ["python", "django"]
}
return JsonResponse(data)
# def student_list_api(request):
# if request.method == "GET":
# students = Student.objects.all()
# students_count = Student.objects.count()
# student_list = []
# for student in students:
# student_list.append({
# "firstname" : student.first_name,
# "lastname" : student.last_name,
# "number" : student.number,
# })
# data = {
# "students" : student_list,
# "count" : students_count
# }
# return JsonResponse(data)
# def student_list_api(request):
# if request.method == "GET":
# students = Student.objects.all()
# students_count = Student.objects.count()
# student_data = serialize("python", students)
# data = {
# "students" : student_data,
# "count" : students_count
# }
# return JsonResponse(data)
# @csrf_exempt
# def student_create_api(request):
# if request.method == "POST":
# post_data = json.loads(request.body)
# print(post_data)
# name = post_data.get("first_name")
# lastname = post_data.get("last_name")
# number = post_data.get("number")
# student_data = {
# "first_name" : name,
# "last_name" : lastname,
# "number" : number,
# }
# student_obj = Student.objects.create(**student_data)
# data = {
# "message" : f"Student {student_obj.first_name} created successfully!"
# }
# return JsonResponse(data, status=201)
@api_view(["GET", "POST"])
def student_list_create_api(request):
if request.method == "GET":
student = Student.objects.all()
serializer = StudentSerializer(student, many=True)
return Response(serializer.data)
elif request.method == "POST":
serializer = StudentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
data = {
"message" : "Student created successfully!"
}
return Response(data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(["GET", "PUT", "DELETE"])
def student_get_update_delete(request, id):
student = get_object_or_404(Student, id=id)
if request.method == "GET":
serializer = StudentSerializer(student)
return Response(serializer.data)
if request.method == "PUT":
serializer = StudentSerializer(student, data=request.data)
if serializer.is_valid():
serializer.save()
data = {
"message" : "Student updated successfully!"
}
return Response(data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if request.method == "DELETE":
student.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class StudentList(APIView):
def get(self, request):
students = Student.objects.all()
serializer = StudentSerializer(students, many=True)
return Response(serializer.data)
def post(self, request):
serializer = StudentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class StudentGetUpdateDelete(APIView):
def get_object(self, id):
try:
return Student.objects.get(id=id)
except Student.DoesNotExist:
return Response(status=status.status.HTTP_404_NOT_FOUND)
def get(self, request, id):
student = self.get_object(id)
serializer = StudentSerializer(student)
return Response(serializer.data)
def put(self, request, id):
student = self.get_object(id)
serializer = StudentSerializer(student, data=request.data)
if serializer.is_valid():
serializer.save()
data = {
"message" : "Student updated"
}
return Response(data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, id):
student = self.get_object(id)
student.delete()
return Response(status=status.status.HTTP_204_NO_CONTENT)
# class StudentList(generics.ListAPIView):
# serializer_class = StudentSerializer
# queryset = Student.objects.all()
class StudentList(generics.ListCreateAPIView):
serializer_class = StudentSerializer
queryset = Student.objects.all()
class StudentGetUpdateDelete(generics.RetrieveUpdateDestroyAPIView):
serializer_class = StudentSerializer
queryset = Student.objects.all()
lookup_field = "id"
class Student(generics.GenericAPIView, mixins.ListModelMixin, mixins.CreateModelMixin, mixins.UpdateModelMixin, mixins.DestroyModelMixin, mixins.RetrieveModelMixin):
serializer_class = StudentSerializer
queryset = Student.objects.all()
lookup_field = "id"
def get(self, request, id=None):
if id:
return self.retrieve(request)
else:
return self.list(request)
def post(self, request):
return self.create(request)
def put(self, request, id=None):
return self.update(request, id)
def delete(self, request, id):
return self.destroy(request, id)
|
20,444 | 2c82163b3c89243f73c7db564c43c0bc6e031844 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-01 06:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('vacs', '0021_auto_20170731_0113'),
]
operations = [
migrations.CreateModel(
name='Validation',
fields=[
('score', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='vacs.Score')),
('selected_lexicons', models.CharField(default='empty', max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
migrations.RemoveField(
model_name='valassignment',
name='current_vac',
),
migrations.RemoveField(
model_name='valassignment',
name='evaluated_vacs',
),
migrations.AddField(
model_name='valassignment',
name='current_score',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='current_val_assigned', to='vacs.Score'),
),
migrations.AddField(
model_name='valassignment',
name='evaluated_scores',
field=models.ManyToManyField(blank=True, related_name='evaluated_val_assigned', to='vacs.Score'),
),
]
|
20,445 | f7f66f11e1cb558a772238073f753a5940365e41 | #!/usr/bin/env python
# Copyright (c) 2018 Intel Labs.
# authors: German Ros (german.ros@intel.com)
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
""" This module implements an agent that roams around a track following random
waypoints and avoiding other vehicles.
The agent also responds to traffic lights. """
import math
from typing import List
import carla
from carla.libcarla import ActorList, Actor
from agents.navigation.agent import Agent, AgentState
from agents.navigation.local_planner import LocalPlanner
from agents.navigation.global_route_planner import GlobalRoutePlanner
from agents.navigation.global_route_planner_dao import GlobalRoutePlannerDAO
from agents.tools.misc import get_nearest_traffic_light, get_speed
class BasicAgent(Agent):
"""
BasicAgent implements a basic agent that navigates scenes to reach a given
target destination. This agent respects traffic lights and other vehicles.
"""
def __init__(self, vehicle, target_speed=20):
"""
:param vehicle: actor to apply to local planner logic onto
"""
super(BasicAgent, self).__init__(vehicle)
self.stopping_for_traffic_light = False
self._proximity_threshold = 10.0 # meters
self._state = AgentState.NAVIGATING
args_lateral_dict = {
'K_P': 0.75,
'K_D': 0.001,
'K_I': 1,
'dt': 1.0 / 20.0}
self._local_planner = LocalPlanner(
self._vehicle, opt_dict={'target_speed': target_speed,
'lateral_control_dict': args_lateral_dict})
self._hop_resolution = 2.0
self._path_seperation_hop = 2
self._path_seperation_threshold = 0.5
self._target_speed = target_speed
self._grp = None
self.drawn_lights = False
self.is_affected_by_traffic_light = False
def set_destination(self, location):
"""
This method creates a list of waypoints from agent's position to destination location
based on the route returned by the global router
"""
start_waypoint = self._map.get_waypoint(self._vehicle.get_location())
end_waypoint = self._map.get_waypoint(
carla.Location(location[0], location[1], location[2]))
route_trace = self._trace_route(start_waypoint, end_waypoint)
assert route_trace
self._local_planner.set_global_plan(route_trace)
def _trace_route(self, start_waypoint, end_waypoint):
"""
This method sets up a global router and returns the optimal route
from start_waypoint to end_waypoint
"""
# Setting up global router
if self._grp is None:
dao = GlobalRoutePlannerDAO(self._vehicle.get_world().get_map(), self._hop_resolution)
grp = GlobalRoutePlanner(dao)
grp.setup()
self._grp = grp
# Obtain route plan
route = self._grp.trace_route(
start_waypoint.transform.location,
end_waypoint.transform.location)
return route
def run_step(self, debug=False):
"""
Execute one step of navigation.
:return: carla.VehicleControl
"""
# is there an obstacle in front of us?
hazard_detected = False
# retrieve relevant elements for safe navigation, i.e.: traffic lights
# and other vehicles
actor_list = self._world.get_actors() # type: ActorList
vehicle_list = actor_list.filter("*vehicle*") # type: List[Actor]
pedestrians_list = actor_list.filter("*walker.pedestrian*")
lights_list = actor_list.filter("*traffic_light*") # type: List[carla.TrafficLight]
if not self.drawn_lights and debug:
for light in lights_list:
self._world.debug.draw_box(
carla.BoundingBox(light.trigger_volume.location + light.get_transform().location,
light.trigger_volume.extent * 2),
carla.Rotation(0, 0, 0), 0.05, carla.Color(255, 128, 0, 0), 0)
self.drawn_lights = True
# check possible obstacles
vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)
if vehicle_state:
if debug:
print('!!! VEHICLE BLOCKING AHEAD [{}])'.format(vehicle.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# Check for pedestrians
pedestrian_state, pedestrian = self._is_pedestrian_hazard(pedestrians_list)
if pedestrian_state:
if debug:
print('!!! PEDESTRIAN BLOCKING AHEAD [{}])'.format(pedestrian.id))
self._state = AgentState.BLOCKED_BY_VEHICLE
hazard_detected = True
# check for the state of the traffic lights
light_state, traffic_light = self._is_light_red(lights_list)
if light_state:
if debug:
print('=== RED LIGHT AHEAD [{}])'.format(traffic_light.id))
self._state = AgentState.BLOCKED_RED_LIGHT
hazard_detected = True
new_target_speed = self._update_target_speed(hazard_detected, debug)
# if hazard_detected:
# control = self.emergency_stop()
# else:
# self._state = AgentState.NAVIGATING
# self.braking_intial_speed = None
# # standard local planner behavior
# control = self._local_planner.run_step(debug=debug)
# if self.stopping_for_traffic_light:
# control.steer = 0.0
self._state = AgentState.NAVIGATING
self.braking_intial_speed = None
# standard local planner behavior
control = self._local_planner.run_step(debug=debug)
if self.stopping_for_traffic_light:
control.steer = 0.0
# Prevent from steering randomly when stopped
if math.fabs(get_speed(self._vehicle)) < 0.1:
control.steer = 0
return control
def done(self):
"""
Check whether the agent has reached its destination.
:return bool
"""
return self._local_planner.done()
def _update_target_speed(self, hazard_detected, debug):
if hazard_detected:
self._set_target_speed(0)
return 0
MAX_PERCENTAGE_OF_SPEED_LIMIT = 0.75
speed_limit = self._vehicle.get_speed_limit() # km/h
current_speed = get_speed(self._vehicle)
new_target_speed = speed_limit * MAX_PERCENTAGE_OF_SPEED_LIMIT
use_custom_traffic_light_speed = False
if use_custom_traffic_light_speed:
TRAFFIC_LIGHT_SECONDS_AWAY = 3
METERS_TO_STOP_BEFORE_TRAFFIC_LIGHT = 8
get_traffic_light = self._vehicle.get_traffic_light() # type: carla.TrafficLight
nearest_traffic_light, distance = get_nearest_traffic_light(self._vehicle) # type: carla.TrafficLight, float
distance_to_light = distance
distance -= METERS_TO_STOP_BEFORE_TRAFFIC_LIGHT
if nearest_traffic_light is None:
nearest_traffic_light = get_traffic_light
# Draw debug info
if debug and nearest_traffic_light is not None:
self._world.debug.draw_point(
nearest_traffic_light.get_transform().location,
size=1,
life_time=0.1,
color=carla.Color(255, 15, 15))
"""
if get_traffic_light is not None:
print("get_traffic_light: ", get_traffic_light.get_location() if get_traffic_light is not None else "None", " ", get_traffic_light.state if get_traffic_light is not None else "None")
if nearest_traffic_light is not None:
print("nearest_traffic_light: ", nearest_traffic_light.get_location() if nearest_traffic_light is not None else "None", " ", nearest_traffic_light.state if nearest_traffic_light is not None else "None")
"""
ego_vehicle_location = self._vehicle.get_location()
ego_vehicle_waypoint = self._map.get_waypoint(ego_vehicle_location)
self.is_affected_by_traffic_light = False
self.stopping_for_traffic_light = False
if ego_vehicle_waypoint.is_junction:
# It is too late. Do not block the intersection! Keep going!
pass
# Check if we should start braking
elif distance_to_light <= TRAFFIC_LIGHT_SECONDS_AWAY * new_target_speed / 3.6 and nearest_traffic_light is not None and nearest_traffic_light.state != carla.TrafficLightState.Green:
self.is_affected_by_traffic_light = True
brake_distance = current_speed / 3.6 * TRAFFIC_LIGHT_SECONDS_AWAY
print("TL distance: ", distance_to_light, ", distance (to stop): ", distance, ", distance travel 4 secs: ", brake_distance)
new_target_speed = self._target_speed
if distance <= 0:
new_target_speed = 0
self.stopping_for_traffic_light = True
print("Stopping before traffic light, distance ", distance, "m")
elif brake_distance >= distance and brake_distance != 0:
percent_before_light = (brake_distance - distance) / brake_distance
new_target_speed = speed_limit - max(0, percent_before_light) * speed_limit
print("Slowing down before traffic light ", percent_before_light * 100, "% ", new_target_speed, " km/h")
self._set_target_speed(max(0, new_target_speed))
return new_target_speed
def _set_target_speed(self, target_speed: int):
"""
This function updates all the needed values required to actually set a new target speed
"""
self._target_speed = target_speed
self._local_planner.set_speed(target_speed)
|
20,446 | 0ec56645977ee47e93b98107f456535a510d6cab | # -*- coding: utf-8 -*-
########
# Copyright (c) 2017-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Local imports
from __future__ import unicode_literals
# Third-party imports
import mock
from mock import patch
from ...tests import TestGCP
from cloudify_gcp.iam import policy_binding
POLICY_A = {'bindings': [{'foo': 'bar'}]}
POLICY_B = {'bindings': [{'baz': 'taco'}]}
MERGED_POLICY = {'bindings': [{'baz': 'taco'}, {'foo': 'bar'}]}
@patch('cloudify_gcp.utils.assure_resource_id_correct', return_value=True)
@patch('cloudify_gcp.gcp.service_account.Credentials.'
'from_service_account_info')
@patch('cloudify_gcp.iam.policy_binding.build')
class TestGCPPolicyBinding(TestGCP):
def test_create(self, mock_build, *_):
policy_binding.create(
resource='foo', policy=POLICY_A)
mock_build().projects().getIamPolicy.assert_any_call(
resource='foo', body={'options': {'requestedPolicyVersion': 3}})
mock_build().projects().setIamPolicy.assert_called_once()
def test_delete(self, mock_build, *_):
policy_binding.delete(
resource='foo', policy=POLICY_A)
mock_build().projects().getIamPolicy.assert_any_call(
resource='foo', body={'options': {'requestedPolicyVersion': 3}})
mock_build().projects().setIamPolicy.assert_called_once()
@patch('cloudify_gcp.iam.policy_binding.PolicyBinding.get',
return_value=POLICY_B)
def test_add_new_policies_to_current_policy(self, *_):
pb = policy_binding.PolicyBinding(
mock.MagicMock(),
mock.MagicMock(),
'foo',
POLICY_A
)
output = pb.add_new_policies_to_current_policy()
self.assertEqual(output, MERGED_POLICY)
@patch('cloudify_gcp.iam.policy_binding.PolicyBinding.get',
return_value=POLICY_B)
def test_remove_new_policies_from_current_policy(self, *_):
pb = policy_binding.PolicyBinding(
mock.MagicMock(),
mock.MagicMock(),
'foo',
POLICY_A
)
output = pb.remove_new_policies_from_current_policy()
self.assertEqual(output, POLICY_B)
|
20,447 | d1f756682a094dc5b7d6a4eb69a2373d6b79fc3a | import logging
from smlcs.helper.read_data import ReadData
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
class DataVisualize:
def plot_class_counts(classes, counts, *args):
try:
y_pos = np.arange(len(classes))
f, ax = plt.subplots(figsize=(18, 10))
plt.bar(y_pos, counts, color=['green', 'black', 'red', 'blue', 'cyan', 'grey'])
plt.xticks(y_pos, classes)
# plt.savefig('total_count_second_phase.png')
plt.show()
except Exception as e:
print('Exception occurred in plot_class_counts: %s', str(e))
def plot_runtime_distribution(runtime, *args):
try:
sns.distplot(runtime, hist=False, rug=False)
except Exception as e:
print('Exception occurred in plot_runtime_distribution: %s', str(e))
def plot_heat_map(dataset, *args):
try:
f, ax = plt.subplots(figsize=(30, 35))
corr = dataset.iloc[:, :-10].corr()
sns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool),
cmap=sns.diverging_palette(220, 10, as_cmap=True),
square=True, ax=ax)
plt.show()
#plt.savefig('plots/feature_correlation_heatmap_dropped_features.png')
except Exception as e:
print('Exception occurred in plot_heat_map: %s', str(e))
if __name__ == '__main__':
try:
logging.basicConfig(filename='../../logs/data_visualization.log', filemode='w',
format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger('Data_visualization')
clf_df, reg_df = ReadData('local', logger).read_dataframe(logger)
runtime = reg_df.iloc[:, 54].tolist()
classes = clf_df.iloc[:, 54].tolist()
print(clf_df.iloc[:, 54].shape)
print(clf_df['class'].value_counts())
plot_runtime_distribution(runtime, logger)
plot_heat_map(clf_df, logger)
#counts = [13843, 2620, 8150, 11053, 2798, 10436]
counts = [13843, 35057]
classes = ('Correct', 'Noresult')
plot_class_counts(classes, counts, logger)
except Exception as e:
logger.error('Failed in the main of data_visualize.py: ' + str(e))
|
20,448 | 4aec8a449ec4ce71002d5395e1cf5a33631efb71 | from utils.AST_interpreter import *
from utils.arrays_utils import compute_real_register_of_array_element, \
generate_code_for_computing_index_of_array_element_by_variable
from utils.compilation_exceptions import *
from utils.value_utils import generate_code_for_loading_value
from structures.ast.AST import ReadCommand, WriteCommand, VariableIdentifier, \
ArrayElementByIntNumberIdentifier, ArrayElementByVariableIdentifier
''' Generates code for read command.'''
def generate_code_for_read_command(
read_command: ReadCommand,
visitor: 'ASTInterpreter',
) -> str:
if isinstance(read_command.identifier, VariableIdentifier):
if read_command.identifier.identifier_name not in visitor.declared_variables:
raise UndeclaredVariableException(f"Undeclared variable '{read_command.identifier.identifier_name}'.",
occurrence_place=read_command.identifier.start_position)
if read_command.identifier.identifier_name in visitor.local_variables:
raise AnAttemptToModifyCounterException(
f"An attempt to modify iterator: '{read_command.identifier.identifier_name}'.",
occurrence_place=read_command.start_position)
result: str = 'GET\n'
result = result + f'STORE {visitor.declared_variables[read_command.identifier.identifier_name]}\n'
elif isinstance(read_command.identifier, ArrayElementByIntNumberIdentifier):
arr_name = read_command.identifier.array_identifier
if arr_name not in visitor.declared_arrays:
raise UndeclaredArrayException(
f"An array '{arr_name}' is not declared.", occurrence_place=read_command.identifier.start_position)
result: str = 'GET\n'
real_element_index = compute_real_register_of_array_element(visitor.declared_arrays, read_command.identifier)
result = result + f'STORE {real_element_index}\n'
elif isinstance(read_command.identifier, ArrayElementByVariableIdentifier):
arr_name = read_command.identifier.array_identifier
var = read_command.identifier.index_identifier
if arr_name not in visitor.declared_arrays:
raise UndeclaredArrayException(
f"An array '{arr_name}' is not declared.", occurrence_place=read_command.identifier.start_position)
if var not in visitor.declared_variables:
raise UndeclaredVariableException(
f"An index variable for accessing element of array '{arr_name}'('{var}') is not declared."
f" '{var}' is not declared",
occurrence_place=read_command.identifier.start_position)
index_computation: str = generate_code_for_computing_index_of_array_element_by_variable(
read_command.identifier, visitor)
result: str = index_computation + f'STORE 1\nGET\nSTOREI 1\n'
else:
raise ValueError('Unknown instance of Identifier occurred.')
return result
''' Generates code for write command.'''
def generate_code_for_write_command(
write_command: WriteCommand,
visitor: 'ASTInterpreter'
) -> str:
result: str = '## BEGIN write command\n' + \
generate_code_for_loading_value(write_command.value, visitor)
return result + 'PUT\n' + '## END write command\n'
|
20,449 | 831336de6d43e735eb71c653b7a6dce2785759eb | def carryover(signals_in, n=4):
i=0
signals = signals_in[:]
while i<len(signals):
if signals[i]!=0:
count = 0
i+=1
while i<len(signals) and signals[i]==0 and count<n:
signals[i] = signals[i-1]
count+=1
i+=1
continue
i+=1
return signals
signals = [0, 0, 1, 0, 0, 0, -1, 0, 0, 0, 0, 1, 0, -1, 0, 1, -1, 0]
a = carryover(signals, 2)
print (signals)
print (a) |
20,450 | b561bf7c61a3eafae9a988b971e55cf19edd93a6 | Ename=input('Enter Employee Name')
EID=int(input('Enter Employee ID'))
ESal=float(input('Enter Employee Salary'))
print('Employee Name:%s'%Ename)
print('Employee ID:%i'%EID)
print('Employee Salary:%f'%ESal)
|
20,451 | d30500389b46cd947a6133e7ab47691d4ad40031 | import adafruit_bus_device.i2c_device as i2c_device
class MotoBitMotor:
FORWARD_FLAG = 0x80
def __init__(self, device, cmd_speed, invert):
self._device = device
self.cmd_speed = cmd_speed
self.invert = invert
def __drive(self, speed):
flags = 0
if self.invert:
speed = -speed
if speed >= 0:
flags |= MotoBitMotor.FORWARD_FLAG
speed = int(speed / 100 * 127)
if speed < -127:
speed = -127
if speed > 127:
speed = 127
speed = (speed & 0x7f) | flags
with self._device:
self._device.write(bytes([self.cmd_speed, speed]))
def forward(self, speed):
self.__drive(speed)
def reverse(self, speed):
self.__drive(-speed)
class MotoBit:
I2C_ADDR = 0x59
CMD_ENABLE = 0x70
CMD_SPEED_LEFT = 0x21
CMD_SPEED_RIGHT = 0x20
def __init__(self, i2c, address=I2C_ADDR):
self._device = i2c_device.I2CDevice(i2c, address)
def enable(self):
with self._device:
self._device.write(bytes([MotoBit.CMD_ENABLE, 0x01]))
def disable(self):
with self._device:
self._device.write(bytes([MotoBit.CMD_ENABLE, 0x00]))
def left_motor(self, invert=False):
return MotoBitMotor(self._device, MotoBit.CMD_SPEED_LEFT, invert)
def right_motor(self, invert=False):
return MotoBitMotor(self._device, MotoBit.CMD_SPEED_RIGHT, invert) |
20,452 | 91a13ba0d04697deaf694d358f81822868b4c651 | import settings
import vozbase
import voz
import styhelper
import logging
import csv
import re
import quotedspeechhelper
import os
import networkcachemanager
logger = logging.getLogger(__name__)
def main():
load_sentence_annotations()
def load_sentence_annotations():
logging.basicConfig(level=logging.DEBUG)
file_path = settings.STY_FILE_PATH
mentions = []
for sty_file in settings.STY_FILES[2:3]:
logger.info("Processing %s" % sty_file)
quoted_speech_file = "all_sentences.tsv"
doc = styhelper.create_document_from_sty_file(file_path+sty_file)
quotedspeechhelper.annotate_sentences(doc, file_path + quoted_speech_file, format='tsv',single_sentences_file_story_id=doc.id)
for sentence in doc.sentences:
assert(isinstance (sentence,voz.Sentence))
if sentence.annotations.is_normal():
print sentence.get_text()
if __name__=='__main__':
main() |
20,453 | aceed16263f96642ed9a50a170bf3303a50a83d9 | """ A View to inform user that shutdown is in progress.
"""
from ubuntui.utils import Padding
from urwid import Filler, LineBox, Pile, Text, WidgetWrap
class ShutdownView(WidgetWrap):
def __init__(self):
message = "Conjure-up is shutting down, please wait."
box = Padding.center_45(LineBox(Pile([
Padding.line_break(""),
Text(message, align="center"),
Padding.line_break(""),
])))
super().__init__(Filler(box, valign="middle"))
|
20,454 | e05f07500056c71868cce92f763ea78c48c5de3e | import tensorflow as tf
import keras
from keras import Sequential, regularizers
from keras.backend import categorical_crossentropy
from keras.layers import ConvLSTM2D, Flatten, Dense, BatchNormalization, MaxPool2D, MaxPool3D
from keras.constraints import Constraint
from keras.constraints import max_norm
from keras.callbacks import ModelCheckpoint
from keras.layers import LeakyReLU, Dropout
from keras.models import load_model
from keras.models import Model
from keras.optimizers import Adadelta
import cv2
import pdb
import numpy as np
import os
import random
from sklearn import metrics
import createmodel
import sys
from utils import switch
kernel = [[-1, 2, -2, 2, -1],
[2, -6, 8, -6, 2],
[-2, 8, -12, 8, -2],
[2, -6, 8, -6, 2],
[-1, 2, -2, 2, -1]]
kernel = np.array((kernel), dtype="float32")
rrate = int(sys.argv[1])
#rrate = 3
data = []
y = []
trainx = []
trainy = []
testx = []
testy = []
real_path = '/mnt/celeb-real-lstm/'
#fake_path = '/mnt/celeb-synthesis-lstm/'
#real_path = '/mnt/celeb-real-eye/'
fake_path = '/mnt/celeb-synthesis-eye/'
#pdb.set_trace()
lstmnum = 4
capnum = 7
alltotal = int(sys.argv[2])
#alltotal = 800
total = alltotal
ttname = ''
vdname = ''
fflag = 1
for name in os.listdir(real_path):
dd = []
if total <= 0:
break
#print(name)
vdname = '_'.join(name.split('_')[:2])
num = int(name.split('.')[0].split('_')[2])
nn = '_'.join(name.split('.')[0].split('_')[:2])
if num < capnum-lstmnum:
try:
for i in range(lstmnum):
imgname = nn+'_'+str(num+i)+'.jpg'
img = cv2.imread(real_path+imgname)
img = cv2.resize(img, (128, 100))
img = cv2.filter2D(img, -1, kernel)
dd.append(img)
dd = np.array(dd)
except:
print(name)
continue
if total > alltotal * 0.2:
trainx.append(dd)
trainy.append([1, 0])
else:
if vdname == 'ttname' and fflag:
continue
fflag = 0
testx.append(dd)
testy.append([1, 0])
total -= 1
ttname = vdname
#pdb.set_trace()
print(len(trainx), len(testx))
podata = len(testy)
total = alltotal*rrate
ftotal = alltotal*rrate
fflag = 1
for name in os.listdir(fake_path):
#if np.random.randint(2) == 1:
# continue
dd = []
if ftotal <= 0:
break
vdname = '_'.join(name.split('_')[:3])
num = int(name.split('.')[0].split('_')[3])
nn = '_'.join(name.split('.')[0].split('_')[:3])
if num < capnum-lstmnum:
try:
for i in range(lstmnum):
imgname = nn + '_' + str(num + i) + '.jpg'
img = cv2.imread(fake_path + imgname)
img = cv2.resize(img, (128, 100))
img = cv2.filter2D(img, -1, kernel)
dd.append(img)
dd = np.array(dd)
except:
print(name)
continue
if ftotal > total * 0.2:
trainx.append(dd)
trainy.append([0, 1])
else:
if ttname == vdname and fflag:
continue
fflag = 0
testx.append(dd)
testy.append([0, 1])
ftotal -= 1
ttname = vdname
negdata = len(testy) - podata
#pdb.set_trace()
seed = random.randint(0, 100)
random.seed(seed)
random.shuffle(trainx)
random.seed(seed)
random.shuffle(trainy)
random.seed(seed)
random.shuffle(testx)
random.seed(seed)
random.shuffle(testy)
#pdb.set_trace()
trainx = np.array(trainx)
trainy = np.array(trainy)
trainx = trainx.reshape(-1, lstmnum, 100, 128, 3)
trainx = trainx.astype('float32')
testx = np.array(testx)
testx = testx.reshape(-1, lstmnum, 100, 128, 3)
testx = testx.astype('float32')
testy = np.array(testy)
#pdb.set_trace()
print(len(trainx))
print(len(testx))
print(podata, negdata)
modelname = sys.argv[3]
pdb.set_trace()
for case in switch(modelname):
if case('m0'):
model = createmodel.create_m0()
break
if case('m1'):
model = createmodel.create_m1()
break
if case('m2'):
model = createmodel.create_m2()
break
if case('m3'):
model = createmodel.create_m3()
break
if case('m4'):
model = createmodel.create_m4()
break
#model = createmodel.create_m0()
def auroc(y_true, y_pred):
return tf.py_func(metrics.roc_auc_score, (y_true, y_pred), tf.double)
model.compile(loss=categorical_crossentropy,
optimizer=Adadelta(),
metrics=['accuracy', auroc])
batch_size = 32
epochs = 60
# pdb.set_trace()
filepath = "weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True,
mode='max')
callbacks_list = [checkpoint]
modelweights = sys.argv[4]
#model = load_model('weights-improvement-27-0.79.hdf5')
model.load_weights(modelweights)
flat_layer = Model(model.input, outputs=model.get_layer('flatten_1').output)
test_out = flat_layer.predict(testx)
train_out = flat_layer.predict(trainx)
print(len(test_out))
pdb.set_trace()
trainyy = trainy[:, 0]
testyy = testy[:, 0]
import catboost as ctb
from catboost import CatBoostClassifier, CatBoostRegressor
metricname = 'AUC'
lossfunc = 'RMSE'
#model = CatBoostClassifier(iterations=10000, depth=3, bagging_temperature=0.2, l2_leaf_reg=50,
# custom_metric=metricname, learning_rate=0.5, eval_metric=metricname, loss_function=lossfunc,
# logging_level='Verbose')
model = CatBoostRegressor(iterations=10000, depth=3, bagging_temperature=0.2, l2_leaf_reg=50,
custom_metric=metricname, learning_rate=0.5, eval_metric=metricname, loss_function=lossfunc,
logging_level='Verbose')
model.fit(train_out, trainyy,eval_set=(test_out, testyy), plot=False)
pdb.set_trace()
predict = model.predict_proba(test_out)
np.save('predictcat', predict)
np.save('testcat', testyy)
|
20,455 | dd805f30d3dfc49536a31f72634fc94c7d7a7e69 | import pytest
from .pages.main_page import MainPage
from .pages.login_page import LoginPage
from time import time
def test_register_new_user(browser, link):
page = MainPage(browser, link)
page.open()
page.go_to_login_page()
login_page = LoginPage(browser, browser.current_url)
login_page.register_new_user(str(time()) + '@testmail.org', 'truepassw')
login_page.should_be_login_icon()
login_page.should_be_success_register_message()
|
20,456 | 555ea66368006115bdd04c33ccfa669beb603a4e | from typing import *
class DatetimeProperties:
# usage.dask: 1
# usage.koalas: 3
date: object
# usage.koalas: 1
day: object
# usage.koalas: 1
dayofweek: object
# usage.koalas: 2
dayofyear: object
# usage.koalas: 1
days_in_month: object
# usage.koalas: 1
daysinmonth: object
# usage.koalas: 1
hour: object
# usage.koalas: 1
is_leap_year: object
# usage.koalas: 1
is_month_end: object
# usage.koalas: 1
is_month_start: object
# usage.koalas: 1
is_quarter_end: object
# usage.koalas: 1
is_quarter_start: object
# usage.koalas: 1
is_year_end: object
# usage.koalas: 1
is_year_start: object
# usage.koalas: 1
microsecond: object
# usage.koalas: 1
minute: object
# usage.koalas: 1
# usage.xarray: 1
month: object
# usage.koalas: 1
second: object
# usage.prophet: 1
tz: object
# usage.koalas: 1
week: object
# usage.koalas: 1
weekday: object
# usage.koalas: 1
weekofyear: object
# usage.koalas: 1
year: object
def floor(self, /):
"""
usage.koalas: 4
"""
...
def round(self, /):
"""
usage.koalas: 2
"""
...
def to_pydatetime(self, /):
"""
usage.dask: 1
"""
...
class TimedeltaProperties:
# usage.koalas: 3
days: object
|
20,457 | f5a29ea316a3fe41bc6773829c8443fbdf836aab | import pandas as pd
import numpy as np
import xgboost as xgb
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score,roc_auc_score
from data_process import preprocess_dataset
train_data=pd.read_csv("./data/train.csv")
test_data=pd.read_csv("./data/test.csv")
datasets=[train_data,test_data]
preprocess_dataset(datasets)
drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp','Parch']
pID=test_data['PassengerId'].as_matrix()
train_data = train_data.drop(drop_elements, axis = 1)
test_data = test_data.drop(drop_elements, axis = 1)
x_train=train_data.drop(['Survived'],axis=1).as_matrix()
y_train=train_data['Survived'].as_matrix()
x_test=test_data.as_matrix()
clf=XGBClassifier(
booster='gbtree',nthread=4,
learning_rate=0.1,min_child_weight=1,
max_depth=5,
subsample=0.8,
colsample_bytree=0.8,
scale_pos_weight=1,
objective='binary:logistic',
seed=24
)
eval_set=[(x_train,y_train)]
clf.fit(x_train,y_train,early_stopping_rounds=100,
eval_metric='logloss',eval_set=eval_set,verbose=True)
clf.save_model("./model/xgboost.model")
predictions=clf.predict(x_test)
result = pd.DataFrame({'PassengerId':pID, 'Survived':predictions.astype(np.int32)})
result.to_csv("./submission/Titanic_xgboost.csv",index=False)
print("succeed to save submission file!") |
20,458 | 2ac224bf0e180ea3d57a926bf079cc37e1b1fee8 | from django.contrib import admin
from .models import Category
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'is_active', 'created', 'updated')
search_fields = ('name',)
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('name',)}
|
20,459 | 2cc5639322236618a6fb0a8b60d70ba2aadef382 | from typing import List
import ghidra.graph.viewer.layout
import java.lang
import java.util
class LayoutLocationMap(object):
"""
A class that holds row and column data for each vertex and edge.
This class will take in a GridLocationMap, which is comprised of grid index
values, not layout space points. Then, the grid values will be used to calculate
offsets and size for each row and column. Each row has a y location and a height; each
column has an x location and a width. The height and width are uniform in size across
all rows and columns, based upon the tallest and widest vertex in the graph.
"""
def __init__(self, gridLocations: ghidra.graph.viewer.layout.GridLocationMap, transformer: com.google.common.base.Function, isCondensed: bool, monitor: ghidra.util.task.TaskMonitor): ...
def articulations(self, __a0: object) -> List[object]: ...
@overload
def col(self, gridX: int) -> ghidra.graph.viewer.layout.Column: ...
@overload
def col(self, __a0: object) -> ghidra.graph.viewer.layout.Column: ...
def columns(self) -> java.util.Collection:
"""
Returns the columns in this location map, sorted from lowest index to highest
@return the columns in this location map, sorted from lowest index to highest
"""
...
def copy(self) -> ghidra.graph.viewer.layout.LayoutLocationMap: ...
def dispose(self) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getColOffsets(self) -> List[int]: ...
def getColumnContaining(self, x: int) -> ghidra.graph.viewer.layout.Column: ...
def getColumnCount(self) -> int: ...
def getRowCount(self) -> int: ...
def getRowOffsets(self) -> List[int]: ...
def gridX(self, col: ghidra.graph.viewer.layout.Column) -> int: ...
def gridY(self, row: ghidra.graph.viewer.layout.Row) -> int: ...
def hashCode(self) -> int: ...
def isCondensed(self) -> bool: ...
def lastColumn(self) -> ghidra.graph.viewer.layout.Column: ...
def lastRow(self) -> ghidra.graph.viewer.layout.Row: ...
def nextColumn(self, column: ghidra.graph.viewer.layout.Column) -> ghidra.graph.viewer.layout.Column: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
@overload
def row(self, gridY: int) -> ghidra.graph.viewer.layout.Row: ...
@overload
def row(self, __a0: object) -> ghidra.graph.viewer.layout.Row: ...
def rows(self) -> java.util.Collection:
"""
Returns the rows in this location map, sorted from lowest index to highest
@return the rows in this location map, sorted from lowest index to highest
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def colOffsets(self) -> List[object]: ...
@property
def columnCount(self) -> int: ...
@property
def condensed(self) -> bool: ...
@property
def rowCount(self) -> int: ...
@property
def rowOffsets(self) -> List[object]: ...
|
20,460 | fe7bf1e5c94c26279cfc03b93d3d2b1f2767a3f6 | # ir_remote.py - simple ir library
from machine import Pin, Timer
from time import sleep_us
import micropython
Device_dict = {
"0000000011111111":"DO",
"0000010000010000":"UPC"}
Buttom_dict = {
"0111001110001100":"OK" ,
"0100100010110111": "UP",
"0100101110110100":"DOWN",
"1001100101100110":"LEFT",
"1000001101111100":"RIGHT",
"0011000011001111":"1",
"0001100011100111":"2",
"0111101010000101":"3",
"0001000011101111":"4",
"0011100011000111":"5",
"0101101010100101":"6",
"0100001010111101":"7",
"0100101010110101":"8",
"0101001010101101":"9",
"0110100010010111":"0" }
def send_id(out_signal, pin_name):
L2 = Pin(pin_name, mode=Pin.AF_PP, af=Pin.AF1_TIM2)
timer = Timer(2,freq = 38000)
ch = timer.channel(1, Timer.PWM, pin =L2, pulse_width_percent = 0.5)
sleep_us(9000)
ch = timer.channel(1, Timer.PWM, pin =L2, pulse_width_percent = 0)
sleep_us(4500)
for i in out_signal:
if i == "0":
ch = timer.channel(1, Timer.PWM, pin =L2, pulse_width_percent = 0.5)
sleep_us(560)
ch = timer.channel(1, Timer.PWM, pin =L2, pulse_width_percent = 0)
sleep_us(565)
else:
ch = timer.channel(1, Timer.PWM, pin =L2, pulse_width_percent = 0.5)
sleep_us(560)
ch = timer.channel(1, Timer.PWM, pin =L2, pulse_width_percent = 0)
sleep_us(1690)
ch = timer.channel(1, Timer.PWM, pin =L2, pulse_width_percent = 0.5)
sleep_us(560)
ch = timer.channel(1, Timer.PWM, pin =L2, pulse_width_percent = 0)
# send_id("11001101001100100111001110001100",'PA5')
def read_id(pin_name):
key = ""
device = ""
L1 = Pin(pin_name, Pin.IN, Pin.PULL_UP)
a = []
#while L1.value() == 1:
# pass
for j in range(30000):
if L1.value() == 0: break
sleep_us(13560) # this for initial time
for i in range(1000):
v = L1.value()
a.append(v)
sleep_us(56)
# print (a, len(a))
a_c = []
count = 0
for i in a:
if i == 1:
count += 1
elif i == 0:
if count > 0 :
a_c.append(count)
count =0
for i in range(len(a_c)):
if a_c[i] > 10:
a_c[i] = "1"
else:
a_c[i] = "0"
# print (a_c)
B1 = "".join(a_c) #print (B1)
Data_device = B1[0:16]
#if len(Data_device) > 0: print ("device: " + Data_device)
Data_buttom = str(B1[16:32])
#if len(Data_buttom) > 0: print ("button: " + Data_buttom)
for key_d in Device_dict.keys():
if str(Data_device) == str(key_d):
device = Device_dict[key_d]
#print (Device_dict[key_d], end = ' ')
if Data_buttom in Buttom_dict.keys():
key = Buttom_dict[Data_buttom]
#print(key)
return Data_device, Data_buttom, device, key
#while True:
# f = read_id(33)
# print("key: " + f[3])
# # send_id(f[1]) |
20,461 | 90cc22bb05c03a70d2f06b37ffe299a1581674d9 | """ONVIF util."""
from __future__ import annotations
import contextlib
import datetime as dt
from functools import lru_cache, partial
import os
import ssl
from typing import Any
from urllib.parse import ParseResultBytes, urlparse, urlunparse
from zeep.exceptions import Fault
utcnow: partial[dt.datetime] = partial(dt.datetime.now, dt.timezone.utc)
# This does blocking I/O (stat) so we cache the result
# to minimize the impact of the blocking I/O.
path_isfile = lru_cache(maxsize=128)(os.path.isfile)
def normalize_url(url: bytes | str | None) -> str | None:
"""Normalize URL.
Some cameras respond with <wsa5:Address>http://192.168.1.106:8106:8106/onvif/Subscription?Idx=43</wsa5:Address>
https://github.com/home-assistant/core/issues/92603#issuecomment-1537213126
"""
if url is None:
return None
parsed = urlparse(url)
# If the URL is not a string, return None
if isinstance(parsed, ParseResultBytes):
return None
if "[" not in parsed.netloc and parsed.netloc.count(":") > 1:
net_location = parsed.netloc.split(":", 3)
net_location.pop()
return urlunparse(parsed._replace(netloc=":".join(net_location)))
return url
def extract_subcodes_as_strings(subcodes: Any) -> list[str]:
"""Stringify ONVIF subcodes."""
if isinstance(subcodes, list):
return [code.text if hasattr(code, "text") else str(code) for code in subcodes]
return [str(subcodes)]
def stringify_onvif_error(error: Exception) -> str:
"""Stringify ONVIF error."""
if isinstance(error, Fault):
message = error.message
if error.detail is not None: # checking true is deprecated
# Detail may be a bytes object, so we need to convert it to string
if isinstance(error.detail, bytes):
detail = error.detail.decode("utf-8", "replace")
else:
detail = str(error.detail)
message += ": " + detail
if error.code is not None: # checking true is deprecated
message += f" (code:{error.code})"
if error.subcodes is not None: # checking true is deprecated
message += (
f" (subcodes:{','.join(extract_subcodes_as_strings(error.subcodes))})"
)
if error.actor:
message += f" (actor:{error.actor})"
else:
message = str(error)
return message or f"Device sent empty error with type {type(error)}"
def is_auth_error(error: Exception) -> bool:
"""Return True if error is an authentication error.
Most of the tested cameras do not return a proper error code when
authentication fails, so we need to check the error message as well.
"""
if not isinstance(error, Fault):
return False
return (
any(
"NotAuthorized" in code
for code in extract_subcodes_as_strings(error.subcodes)
)
or "auth" in stringify_onvif_error(error).lower()
)
def create_no_verify_ssl_context() -> ssl.SSLContext:
"""Return an SSL context that does not verify the server certificate.
This is a copy of aiohttp's create_default_context() function, with the
ssl verify turned off and old SSL versions enabled.
https://github.com/aio-libs/aiohttp/blob/33953f110e97eecc707e1402daa8d543f38a189b/aiohttp/connector.py#L911
"""
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext.check_hostname = False
sslcontext.verify_mode = ssl.CERT_NONE
# Allow all ciphers rather than only Python 3.10 default
sslcontext.set_ciphers("DEFAULT")
with contextlib.suppress(AttributeError):
# This only works for OpenSSL >= 1.0.0
sslcontext.options |= ssl.OP_NO_COMPRESSION
sslcontext.set_default_verify_paths()
# ssl.OP_LEGACY_SERVER_CONNECT is only available in Python 3.12a4+
sslcontext.options |= getattr(ssl, "OP_LEGACY_SERVER_CONNECT", 0x4)
return sslcontext
|
20,462 | d63bf5f78efb72f3ec36f1aeeff32ccd873e0994 | # a package is collection of modules that can be imported under one name. A package is a directory containing module files. This directory should also contain `__init__.py` which is usually empty to make this directory as python package
# python package name should be all lowercase letters although the use of underscores is discouraged. (https://www.python.org/dev/peps/pep-0008/#package-and-module-names)
import basicmath
# like modules, python load package only once. It also compiles the package inside __pycache__ directory inside the package directory itself. This directory should not be commited as bytecode is tailored to the OS.
import basicmath
# like __name__ variable inside a file tells if the file is imported as a module or executed as main. Likewise, __package__ tells how a module is accessed. If a file is executed as main, __package__ is `None`. If a file is accessed as module but does not contains inside a package, __package__ is empty. If a file is accessed as a module from within a package, __package__ is the name of the package it contains inside of access as. For __init__.py, __name__ and __package__ is the same, which is name of the package.
# python package can also be reloaded using `importlib` package
import importlib
importlib.reload(basicmath)
# to see all exported members, we need to use `dir` function
print( "dir(basicmath) => ", dir(basicmath) )
# `__init__.py` file can also contain export members which will be accessible on package name
print( "basicmath.info() => ", basicmath.info() )
# import `calculator` module from a package
import basicmath.calculator
print( 'basicmath.calculator.add(1, 2) => ', basicmath.calculator.add(1, 2) )
# another module import style
from basicmath import calculator
print( 'calculator.add(1, 2) => ', calculator.add(1, 2) )
# python packages can have nested subppackages. A subpackage is directory inside package directory containing __init__.py file.
# from basicmath.numbers package, import power module
from basicmath.numbers import power
print( 'power.square(2) => ', power.square(2) )
# python package can also be conditionally loaded
if True:
# from basicmath.numbers.power module, import square function
from basicmath.numbers.power import square
print( 'square(2) => ', square(2) )
# similar to module search pyth, python looks for packages in same order.
# intra-package reference using absolute name is valid. Check basicmath/calculator.py
# but importing anything inside a module also exports it (like `power` is available on calculator)
print( 'calculator.square(3)', calculator.square(3) )
# we can use relative imports as well (https://docs.python.org/3/tutorial/modules.html#intra-package-references)
# inside a package, if a script is executed as package, where `__name__` is `packagename`, `.` also represent as a package. Hence, we imported everything from `.utils`modules (same as basicmath.calculator but empty package name) and dumped inside package namespace to export them. We also renamed it to utility and exported it.
print( "basicmath.utility.version() => ", basicmath.utility.version() )
print( "basicmath.version() => ", basicmath.version() ) |
20,463 | 65fb03c601dd3914aa7c858d4535afb59ff784b7 | # cu_1336.py
# BvK force constants
element = "Cu"
lattice_type = "fcc"
temperature = 1336 # Units: K
reference = "Larose, A., Brokchouse, B.N.: Can. J. Phys. 54 (1976) 1990"
details = "General force for next neighbour, otherwise axially symmetric model"
a = 3.61 # lattice parameters in angstroms
# Units: N m^-1
force_constants = { "110": { "xx": 11.718,
"zz": -1.787,
"xy": 13.653 },
"200": { "xx": 0.238,
"yy": -0.279 },
"211": { "xx": 0.325,
"yy": 0.095,
"yz": 0.076,
"xz": 0.153 },
"220": { "xx": -0.246,
"zz": 0.093,
"xy": -0.339 },
"310": { "xx": -0.074,
"yy": -0.092,
"zz": -0.095,
"xy": 0.007 } }
|
20,464 | 3fd0d076aaebb50f0f0d8875a922db9d4fa6446f | adapters = []
f = open("10.txt")
for line in f.readlines():
adapters.append(int(line.replace("\n", "")))
f.close()
adapters.sort()
adapters.insert(0, 0) #insert charging port
adapters.append(max(adapters) + 3) #insert device
last = adapters[0]
ones = 0
threes = 0
for a in adapters:
diff = a - last
if diff == 1:
ones += 1
elif diff == 3:
threes += 1
last = a
print(ones * threes)
#Part two, helped by https://github.com/tudorpavel/advent-of-code-2020/tree/master/day10
slices = []
s = []
for i in range(0, len(adapters) - 1):
if adapters[i + 1] - adapters[i] == 1:
s.append(adapters[i])
elif adapters[i + 1] - adapters[i] == 3:
s.append(adapters[i])
slices.append(s)
s = []
prod = 1
for s in slices:
if len(s) == 1:
prod *= 1
elif len(s) == 2:
prod *= 1
elif len(s) == 3:
prod *= 2
elif len(s) == 4:
prod *= 4
elif len(s) == 5:
prod *= 7
else:
print("Ahhhh")
print(prod) |
20,465 | 3580d8fd00e4f97020aaa12b4a8b302b9a1f47df | #!/usr/bin/env python
import datetime
import json
import numpy
import pandas
import scipy.sparse
import scipy.optimize
import urllib
import pandasdmx
def atlas_api_call(model="hs07", export=True, year=datetime.date.today().year - 2, origin=None, destination=None, product=None):
"""Load export/import data from the OEC
The Observatory of Economic Complexity atlas has a JSON API documented on
http://atlas.media.mit.edu/api/. Query it for some data.
"""
return json.load(
urllib.request.urlopen(
"http://atlas.media.mit.edu/{:}/{:}/{:}/{:}/{:}/{:}/".format(
model, "export" if export else "import", year,
"all" if origin is None else origin,
"all" if destination is None else destination,
"show" if product is None else product)))
# Some interesting data set URLs:
# worldbank_gdp_table: "http://databank.worldbank.org/data/download/GDP.csv"
# un statistics division, Worldbank world development indicator SDMX API: http://data.un.org/WS/rest/data/DF_UNDATA_WDI/A.AG_LND_AGRI_ZS.AFG+ALB/?lastNObservations=1'
# Many IO-Tables, some with high level of detail: https://www.bea.gov/industry/io_annual.htm and https://www.bea.gov/industry/io_benchmark.htm
# IO-Tables for the OECD countries. {:} can be one of the IDs below: "http://stats.oecd.org/SDMX-JSON/data/IOTS/{:}.NLD../all?detail=Full&dimensionAtObservation=AllDimensions&startPeriod=2008&endPeriod=2011"
# OECD inter-country IO tables: http://www.oecd.org/sti/ind/inter-country-input-output-tables.htm
[{'id': 'LEONTFD', 'name': 'Leontief inverse matrix (domestic)'},
{'id': 'LEONTFT', 'name': 'Leontief inverse matrix (total)'},
{'id': 'TTL', 'name': 'Total'},
{'id': 'VAL', 'name': 'Value added'},
{'id': 'DOMIMP', 'name': 'Domestic output and imports'},
{'id': 'ICESHR', 'name': 'Imports content of exports, as % of exports'}]
# Obtain some country data relevant for ecological footprint calculations
unstats = pandasdmx.Request("UNSD")
for name, description in [
("AG_LND_AGRI_ZS", "Agricultural land (% of land area)"),
("AG_LND_FRST_K2", "Forest area (sq. km)"),
("EN_ATM_CO2E_PC", "CO2 emissions (metric tons per capita)"),
("NY_GDP_MKTP_CD", "GDP (current US$)"),
("NY_GNP_PCAP_CD", "GNI per capita, Atlas method (current US$)"),
("NY_GNP_PCAP_PP_CD", "GNI per capita, PPP (current international $)"),
("NY_GNP_ATLS_CD", "GNI, Atlas method (current US$)"),
("NY_GNP_MKTP_PP_CD", "GNI, PPP (current international $)"),
("AG_SRF_TOTL_K2", "Surface area (sq. km)")]:
data = unstats.get(resource_type='data', resource_id="DF_UNDATA_WDI",
key={"FREQ": "A", "SERIES": name, "REF_AREA": []},
params={"lastNObservations": 1})
sequence = data.write().max(axis=0)
sequence.index = sequence.index.droplevel().droplevel()
try:
alldata[description] = sequence
except NameError:
alldata = pandas.DataFrame({description: sequence})
alldata["PPP"] = alldata["GNI per capita, Atlas method (current US$)"]/alldata["GNI per capita, PPP (current international $)"]
# Unit: US$/int$
# Read the ICIO (load it from server if unavailable) and generate a list of
# countries (with their PPPs and areas) and sectors
try:
all_matrix = pandas.read_csv("./ICIO2016_2011.csv").set_index("Unnamed: 0")
except FileNotFoundError:
from io import BytesIO
import zipfile
zipped = zipfile.ZipFile(BytesIO(
urllib.request.urlopen(
"http://www.oecd.org/sti/ind/ICIO2016_2011.zip").read()))
zipped.extract("ICIO2016_2011.csv")
all_matrix = pandas.read_csv("./ICIO2016_2011.csv").set_index("Unnamed: 0")
all_output = all_matrix.loc["OUT"]
sectors = [sect[4:] for sect in all_matrix.columns if sect.startswith("AUS_")]
countries = alldata.index
countryarea = alldata["Surface area (sq. km)"].values
# Unit: km²
# Now, I assume that the amount of land to create 1 int$ of goods/services is a
# meaningful number. I assume that it does depend on the sector, but not on the
# country. I assume that every (OECD) country essentially uses its entire land
# area, and none of its sea etc. area, in these industries, in weighted
# proportion to the sum of output they produce.
#
# That is, if O is the output matrix of the sectors by country, given in int$,
# and A is the vector of country land areas in km², then the minimum w of
# |O×w-A|² with 0≤w – with units km²/int$ – is a starting point for estimating
# the environmental footprint of goods and services known by industry sector,
# but not by production chain.
output_ppp = numpy.zeros((len(countries), len(sectors)))
for c, country in enumerate(countries):
for s, sector in enumerate(sectors):
try:
output_ppp[c, s] = (all_output[country+"_"+sector] /
alldata["PPP"][country])
# Unit: int$ = US$ / (US$/int$)
except KeyError:
print(country+"_"+sector)
output_ppp[c, s] = numpy.nan
# Remove lines with missing data
output_ppp = output_ppp[~numpy.isnan(output_ppp).any(1)]
optimized = scipy.optimize.lsq_linear(output_ppp[:,:-1], output_ppp[:,-1], (0, numpy.inf))
# Unit: km²/int$
print(dict(zip(sectors, optimized.x)))
|
20,466 | 13c00aaeaacd1b87c23ce7d67645267d88ec6b1a | import json
import random
import re
import requests
url = "http://guba.eastmoney.com/"
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134",}
proxies = [{'http':'120.78.94.212:8866'},
{'https':'27.152.91.157:9999'},
{'http':'111.29.3.220:8080'},
{'https':'113.124.92.35:9999'},
{'http':'111.29.3.195:80'},
]
proxy = random.choice(proxies)
print(proxy)
content = requests.get(url=url, headers=headers, proxies=proxy).content.decode('utf-8')
ul_re = re.compile(r'<ul class="newlist" tracker-eventcode="gb_xgbsy_ lbqy_rmlbdj">[\d\D]*?</ul>')
ul = ul_re.findall(content)[0]
with open('guba.html', 'w', encoding='utf-8') as fp:
fp.write(ul)
li_re = re.compile(r'<li>\r\n([\d\D]*?</li>)')
li = li_re.findall(ul)
gu_list = []
for values in li:
gu_dict = {}
# 阅读数和评论数
nums_re = re.compile(r'<cite>\s+(\d*?)\s+</cite>')
nums = nums_re.findall(values)
gu_dict['read_nums'] = nums[0]
gu_dict['comment_nums'] = nums[1]
# 详细页面地址和标题
detail_re = re.compile(r'</em>\s+<a href="([\d\D]*?)"\s+title="([\d\D]*?)"\s+class="note">')
detail = detail_re.findall(values)
if detail:
gu_dict['detail_url'] = 'http://guba.eastmoney.com'+ detail[0][0]
gu_dict['title'] = detail[0][1]
else:
detail_re = re.compile(r']\s+<a href="([\d\D]*?)"\s+title="([\d\D]*?)"\s+class="note">')
detail = detail_re.findall(values)
if detail:
gu_dict['detail_url'] = 'http://guba.eastmoney.com' + detail[0][0]
gu_dict['title'] = detail[0][1]
else:
gu_dict['detail_url'] = ''
gu_dict['title'] = ''
# 发布时间
realtime_re = re.compile(r'<cite class="last">([\d\D]*?)</cite>')
realtime = realtime_re.findall(values)
gu_dict['realtime'] = realtime[0]
# 作者
author_re = re.compile(r'target="_blank"><font>([\d\D]*?)</font>')
author = author_re.findall(values)
gu_dict['author'] = author[0]
gu_list.append(gu_dict)
for url in gu_list:
detail_url = url.get('detail_url')
content = requests.get(url=detail_url, headers=headers, proxies=proxy).content.decode('utf-8')
comment_re = re.compile(r'id="zwcontent">[\d\D]*?</div>')
|
20,467 | ecda6c2be95a2f0dcb53a4addbb6de3fd241a182 | def demo1():
return int(input("请输入整数:"))
def demo2():
# 调用demo1方法
return demo1()
try:
print(demo2())
except Exception as e:
print("未知错误 -- %s" % e) |
20,468 | 8e38b0772dd4b15e2034130bda3d9ec823e57f3c | from typing import Tuple
import bpy
import os
import sys
import re
from pathlib import Path
import traceback
import subprocess
from . import fn
def get_last_traceback(to_clipboad=False) -> Tuple[int, str]:
'''Get last traceback error details summed in string
return a tuple'''
import sys
message = ''
linum = ''
if hasattr(sys, "last_traceback") and sys.last_traceback:
i = 0
last=sys.last_traceback.tb_next
tbo = None
while last:
i+=1
tbo = last
last = last.tb_next
if i>100:
print()
return (1, "bad recursion")
if not tbo: tbo = sys.last_traceback
linum = sys.last_traceback.tb_lineno# first linum
message += f'from line {str(linum)}\n'
frame = str(tbo.tb_frame)
if frame:
if 'file ' in frame:
# frame = 'file: ' + frame.split('file ')[1]
frame = '\n'.join(frame.split(', ')[1:3])
message += f'{frame}\n'
else:
print()
return (1, 'No error traceback found by sys module')
if hasattr(sys, "last_type") and sys.last_type:
error_type = str(sys.last_type)
error_type = error_type.replace("<class '", "").replace("'>","")
message = f'type {error_type}\n{message}'
if hasattr(sys, "last_value") and sys.last_value:
message += f'error : {str(sys.last_value)}\n'
if not linum and hasattr(sys.last_value, "lineno"):# maybe not usefull
print('use "last_value" line num')
message += f'line {str(sys.last_value.lineno)}\n'
if not message :
print()
return (1, 'No message to display')
if message and to_clipboad:
bpy.context.window_manager.clipboard = message
return (0, message)
def get_traceback_stack(tb=None):
if tb is None:
tb = sys.last_traceback
stack = []
if tb and tb.tb_frame is None:
tb = tb.tb_next
while tb is not None:
stack.append((tb.tb_frame, tb.tb_lineno))
tb = tb.tb_next
return stack
class DEV_OT_copy_last_traceback(bpy.types.Operator):
bl_idname = "devtools.copy_last_traceback"
bl_label = "Copy Last Traceback"
bl_description = "Copy last traceback error in clipboard"
bl_options = {"REGISTER"}
def execute(self, context):
error, content = get_last_traceback(to_clipboad=True)
if error:
self.report({'ERROR'}, content)
return {"CANCELLED"}
return {"FINISHED"}
class DEV_OT_artificial_error(bpy.types.Operator):
bl_idname = "devtools.artificial_error"
bl_label = "Artificial Error"
bl_description = "Generate an artificial Error"
bl_options = {"REGISTER"}
def execute(self, context):
## Trigger zero Division Error
provoked_error = 2/0
return {"FINISHED"}
class DEV_OT_clear_last_traceback(bpy.types.Operator):
bl_idname = "devtools.clear_last_traceback"
bl_label = "Clear Last Traceback"
bl_description = "Clear last traceback infos (deleting sys.last_traceback, etc)"
bl_options = {"REGISTER", "INTERNAL"}
def execute(self, context):
if hasattr(sys, 'last_traceback') and sys.last_traceback is not None:
del sys.last_traceback
if hasattr(sys, 'last_value') and sys.last_value is not None:
del sys.last_value
if hasattr(sys, 'last_type') and sys.last_type is not None:
del sys.last_type
return {"FINISHED"}
class DEV_OT_open_error_file(bpy.types.Operator):
bl_idname = "devtools.open_error_file"
bl_label = "Open Traceback Errors"
bl_description = "Open the file where there as been a traceback error"
bl_options = {"REGISTER"}
path_line : bpy.props.StringProperty(options={'SKIP_SAVE'})
use_external : bpy.props.BoolProperty(default=False, options={'SKIP_SAVE'})
from_clipboard : bpy.props.BoolProperty(default=False, options={'SKIP_SAVE'})
def invoke(self, context, event):
if self.path_line:
# print('self.path_line: ', self.path_line)#Dbg
if self.use_external:
editor = fn.get_addon_prefs().external_editor
if not editor:
mess = fn.missing_external_editor()
self.report({'WARNING'}, mess)
return {"CANCELLED"}
## Use passed line direcly when recalling operator
cmd = [editor, '--goto', self.path_line]
print('cmd: ', cmd)
## Note: Never get what happen with the shell argument
## True on windows and False on linux seem to work empirically...
subprocess.Popen(cmd, shell=sys.platform.startswith('win'))
else:
# Open file in blender
path, linum = self.path_line.rsplit(':', 1)
linum = int(linum)
fn.set_file_in_text_editor(path, linum=linum, context=context)
return {"FINISHED"}
pattern = r'[Ff]ile [\'\"](.*?)[\'\"], line (\d+),'
self.error_desc = None
self.error_list = []
if self.from_clipboard:
clip = context.window_manager.clipboard
try:
self.error_list = re.findall(pattern, clip)
except:
self.report({'ERROR'}, 'Failed to parse clipboard for filepath and line number')
return {"CANCELLED"}
else:
if not hasattr(sys, "last_traceback"):
self.report({'ERROR'}, 'No last traceback found with sys"')
return {"CANCELLED"}
'''## old method
stack = get_traceback_stack()
if stack is None:
self.report({'ERROR'}, 'No last traceback found using "sys.last_traceback"')
return {"CANCELLED"}
# first result of findall with pattern of first element of error (traceback frame)
self.error_list = [re.findall(pattern, str(error[0]))[0] for error in stack]
'''
tb_list = traceback.extract_tb(sys.last_traceback)
if not tb_list:
self.report({'ERROR'}, 'No last traceback found using "sys.last_traceback"')
return {"CANCELLED"}
## TODO: Handle case when started from Blender and have a script
## sometimes resolve() give a too long -not needed- url.
## Always resolve with list comprehension
# self.error_list = [(str(Path(t.filename).resolve()), t.lineno, t.line, t.name) for t in tb_list]
always_resolve = False # Only resolve on symlink
for t in tb_list:
# if bpy.data.filepath and t.filename.startswith(bpy.data.filepath):
file_path = Path(t.filename)
current_blend = Path(bpy.data.filepath).name
# Case when script executed from blend and is loaded externally
if file_path.parent.name == current_blend:
txt = bpy.data.texts.get(file_path.name)
if txt:
if txt.filepath:
file_path = Path(os.path.abspath(bpy.path.abspath(txt.filepath)))
if always_resolve or (file_path.exists() and file_path.is_symlink()):
file_path = file_path.resolve() # resolve symlink
self.error_list.append((str(file_path), t.lineno, t.line, t.name))
## add error type and description
error_type = str(sys.last_type)
error_type = error_type if error_type else "Error"
error_type = error_type.replace("<class '", "").replace("'>","")
error_value = sys.last_value
if error_value:
self.error_desc = f'{error_type} : {str(error_value)}\n'
if not self.error_list:
self.report({'ERROR'}, 'No filepath and line number found in clipboard')
return {"CANCELLED"}
return context.window_manager.invoke_props_dialog(self, width=500)
def draw(self, context):
layout = self.layout
col = layout.column()
for item in self.error_list:
path, line = item[0], item[1]
# print(path, ' ', line)
goto_line = f'{path}:{line}'
box = col.box()
boxcol = box.column()
boxcol.alignment = 'LEFT'
button_row = boxcol.row(align=True)
op = button_row.operator('devtools.open_error_file', text=f'{Path(path).name} : {line}', icon='MENU_PANEL')
op.path_line = goto_line
op.use_external = False
op = button_row.operator('devtools.open_error_file', text='', icon='TEXT')
op.path_line = goto_line
op.use_external = True
boxcol.label(text=path)
if len(item) > 3 and item[3]:
boxcol.label(text=f'in: {item[3]}')
if len(item) > 2 and item[2]:
boxcol.label(text=item[2])
col.separator()
row = layout.row()
row.alignment = 'LEFT'
row.operator('devtools.clear_last_traceback', text='Clear Traceback', icon='CANCEL')
if self.error_desc:
for l in self.error_desc.split('\n'):
row = col.row()
row.alignment = 'LEFT'
row.label(text=l)
def execute(self, context):
if self.path_line:
return {"FINISHED"}
return {"FINISHED"}
def help_error_top_bar(self, context):
layout = self.layout
if hasattr(sys, 'last_traceback') and sys.last_traceback:
region = context.region
if region.alignment == 'RIGHT':
layout.operator("devtools.open_error_file", text = "", icon = 'ERROR')
def help_error_menu(self, context):
layout = self.layout
layout.separator()
## titles:
# Open Last Errors /or/ Open Last Traceback File
# Open Errors From Clipboard /or/ Open File From Copied Error
layout.operator('devtools.open_error_file', text='Open Traceback Errors', icon='FILE')
layout.operator('devtools.open_error_file', text='Open Errors From Clipboard', icon='PASTEDOWN').from_clipboard = True
# Copy is beta
# layout.operator('devtools.copy_last_traceback', text='Copy Last Traceback', icon='COPYDOWN')
classes = (
DEV_OT_open_error_file,
DEV_OT_artificial_error,
DEV_OT_copy_last_traceback,
DEV_OT_clear_last_traceback,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.TOPBAR_HT_upper_bar.append(help_error_top_bar)
bpy.types.TOPBAR_MT_help.append(help_error_menu)
def unregister():
bpy.types.TOPBAR_MT_help.remove(help_error_menu)
bpy.types.TOPBAR_MT_help.remove(help_error_top_bar)
for cls in classes:
bpy.utils.unregister_class(cls)
|
20,469 | ae16ad0bbe8e170334550edb091bdd3291bb4593 | '''
@Author : now more
@Contact : lin.honghui@qq.com
@LastEditors : now more
@Description :
@LastEditTime: 2020-07-29 09:52:06
'''
'''
@Author : now more
@Contact : lin.honghui@qq.com
LastEditors: Please set LastEditors
@Description :
LastEditTime: 2020-07-28 21:22:17
'''
import torch.nn as nn
from utils import weights_init_kaiming,weights_init_classifier
import model.layers as layers
class ReductionHead(nn.Module):
def __init__(self, in_feat,reduction_dim):
super(ReductionHead,self).__init__()
self.reduction_dim = reduction_dim
self.bottleneck = nn.Sequential(
nn.Conv2d(in_feat, reduction_dim, 1, 1, bias=False),
nn.BatchNorm2d(reduction_dim),
nn.LeakyReLU(0.1, inplace=True),
)
self.bnneck = nn.BatchNorm2d(reduction_dim)
self.bnneck.bias.requires_grad_(False) # no shift
self.bottleneck.apply(weights_init_kaiming)
self.bnneck.apply(weights_init_kaiming)
def forward(self, features):
global_feat = self.bottleneck(features)
bn_feat = self.bnneck(global_feat)
bn_feat = bn_feat[..., 0, 0]
# if not self.training:
# return bn_feat,None
return bn_feat
|
20,470 | 22412b3ae332cdb49ca5f6ac241af9591efd2e0f | # Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from typing import Tuple, Any, Union
def int_to_bytes(v: int) -> bytes:
n_bytes = ((v + (v < 0)).bit_length() + 8) // 8
return v.to_bytes(n_bytes, byteorder="big", signed=True)
def bytes_to_int(v: bytes) -> int:
return int.from_bytes(v, "big", signed=True)
class TypeTag(object):
NIL = 0
DICT = 1
LIST = 2
BYTES = 3
STRING = 4
BOOL = 5
CUSTOM = 10
INT = CUSTOM + 1
ADDRESS = CUSTOM
class Address(object):
def __init__(self, obj):
if isinstance(obj, bytes):
if len(obj) != 21:
raise Exception("IllegalFormat")
self.__bytes = copy(obj)
self.__check_prefix()
elif isinstance(obj, str):
if len(obj) != 42:
raise Exception("IllegalFormat")
prefix = bytes([obj[:2] == "cx"])
body = bytes.fromhex(obj[2:])
self.__bytes = prefix + body
else:
raise Exception(f"IllegalFormat: type={type(obj)}")
@staticmethod
def from_str(s: str) -> 'Address':
if len(s) != 42:
raise Exception("IllegalFormat")
prefix = bytes([s[:2] == "cx"])
body = bytes.fromhex(s[2:])
return Address(prefix + body)
def to_bytes(self):
return copy(self.__bytes)
def __repr__(self):
body = self.__bytes[1:].hex()
if self.__bytes[0] == 0:
return "hx" + body
else:
return "cx" + body
def __check_prefix(self):
prefix = self.__bytes[0]
if prefix != 0 and prefix != 1:
raise Exception(f"IllegalFormat: prefix={hex(prefix)}")
def encode_any(o: Any) -> Tuple[int, Any]:
if o is None:
return TypeTag.NIL, None
elif isinstance(o, dict):
m = {}
for k, v in o.items():
m[k] = encode_any(v)
return TypeTag.DICT, m
elif isinstance(o, list) or isinstance(o, tuple):
lst = []
for v in o:
lst.append(encode_any(v))
return TypeTag.LIST, lst
elif isinstance(o, bytes):
return TypeTag.BYTES, o
elif isinstance(o, str):
return TypeTag.STRING, o.encode('utf-8')
elif isinstance(o, bool):
if o:
return TypeTag.BOOL, b'\x01'
else:
return TypeTag.BOOL, b'\x00'
elif isinstance(o, int):
return TypeTag.INT, int_to_bytes(o)
elif isinstance(o, Address):
return TypeTag.ADDRESS, o.to_bytes()
else:
raise Exception(f"UnknownType: {type(o)}")
def decode(tag: int, val: bytes) -> 'Any':
if tag == TypeTag.BYTES:
return val
elif tag == TypeTag.STRING:
return val.decode('utf-8')
elif tag == TypeTag.INT:
return bytes_to_int(val)
elif tag == TypeTag.BOOL:
if val == b'\x00':
return False
elif val == b'\x01':
return True
else:
raise Exception(f'IllegalBoolBytes{val.hex()})')
elif tag == TypeTag.ADDRESS:
return Address(val)
else:
raise Exception(f"UnknownType: {tag}")
def decode_any(to: list) -> Any:
tag: int = to[0]
val: Union[bytes, dict, list] = to[1]
if tag == TypeTag.NIL:
return None
elif tag == TypeTag.DICT:
obj = {}
for k, v in val.items():
if isinstance(k, bytes):
k = k.decode('utf-8')
obj[k] = decode_any(v)
return obj
elif tag == TypeTag.LIST:
obj = []
for v in val:
obj.append(decode_any(v))
return obj
else:
return decode(tag, val)
def decode_param(typ: str, val: bytes) -> Any:
# print(f' ** typ={typ} val={val} len={len(val)}')
if typ == 'Address':
return decode(TypeTag.ADDRESS, val)
elif typ == 'int':
return decode(TypeTag.INT, val)
elif typ == 'bytes':
return decode(TypeTag.BYTES, val)
|
20,471 | 75b039d868d1115dc878e3da9e7ff8102f3d787b | from wave_file_manager import wave_file_read_samples, wave_file_write_samples
from exclude_silence_processing import get_samples_without_silences
# 1 - Lecture du fichier wav et récupération des samples
#input_filename = "test1.wav"
#input_filename = "test_glitch.wav"
#input_filename = "test_phrase_informatique.wav"
input_filename = "test_anglais.wav"
wav_samples = wave_file_read_samples(input_filename)
if wav_samples == None:
print("ERREUR: Aucun sample à la lecture du fichier wav")
exit(0)
# 2 - Processing : algorithme pour supprimer les silences
wav_samples_without_silences = get_samples_without_silences(wav_samples)
# 3 - Ecriture du fichier wav de sortie
output_filename = input_filename[:-4] + "_OUT" + input_filename[-4:]
print("OUTPUT FILENAME", output_filename)
wave_file_write_samples(output_filename, wav_samples_without_silences)
|
20,472 | a2aa19ad8be61d2a2c0ef7bf1de2177bd1a63b58 | from django.views import generic
from django.http import HttpResponse
from django.views.generic import View
from django.views.generic.base import TemplateView
class IndexView(TemplateView):
template_name = 'index.html'
|
20,473 | 5b912aa507129c3e5bee56954e419de84301851f | from src.classes.compte.Courant import Courant
from src.classes.compte.Epargne import Epargne
from src.classes.client.Client import Client
if __name__ == "__main__":
cl = Client(id=1, nom="Amine")
c = Courant(numCompte=12, solde=100)
cl.setCompte(c)
c.debiter(20)
c.crediter(500)
e = Epargne(numCompte=13, solde=1000, taux=7)
cl.setCompte(e)
e.GetNewSolde()
comptes = cl.getCompte()
print("Compte du client: ", cl.getNom())
print("mes comptes: ", comptes)
print("Compte Courant: n°{} - solde: {}".format(c.getNumCompte(), c.getSolde()))
print("Compte Epargne n°{} - solde: {}".format(e.getNumCompte(), e.getSolde()))
|
20,474 | 70bfed44ab6007159aeabfedd22b0d5ea8cf8f26 | from django.shortcuts import render
from . next_day import w,t,nday,h,p
from django.contrib.auth.models import User
from . yahoo import climate #import climate.time,temp,wind_dir,wind_speed,sunraise,humidity,presure,visibility
from datetime import datetime
from django.shortcuts import render
from rest_framework.views import APIView
from django.views.generic.edit import FormView
from rest_framework.response import Response
from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from .form import Signupform, Signinform, Resetform, UserDetail
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from . models import weather_data,pre
def data(request):
return render(request, 'water\\index.html', {'username': request.user.username})
def partners(request):
return render(request, 'analy\patners.html', {'username': request.user.username})
def about(request):
return render(request, 'analy\\about.html', {'username': request.user.username})
def graph(request):
data = weather_data.objects.all()
return render(request, 'analy\chart1.html',{'temp': data})
def pregraph(request):
data = weather_data.objects.all()
return render(request, 'analy\chart4.html',{'temp': data})
def humgraph(request):
data = weather_data.objects.all()
return render(request, 'analy\charth.html',{'temp': data})
def visigraph(request):
data = weather_data.objects.all()
return render(request, 'analy\chartv.html',{'temp': data})
def sig(request):
context = {'form': Signinform(), 'pad': '30', 'val': 'Login', 'nam': 'signin', 'new': True}
form = Signinform(request.POST)
if request.user.is_authenticated():
print("login")
if request.method == 'POST' and form.is_valid():
if 'signin' in request.POST:
username1 = form.cleaned_data['user_name']
password1 = form.cleaned_data['password']
user = authenticate(request, username=username1, password=password1)
if user is not None:
login(request, user)
return HttpResponse('login sucess')
else:
return HttpResponse('login no possible')
else:
return render(request, 'analy\sign.html',context)
def graphwind(request):
data = weather_data.objects.all()
return render(request, 'analy\chart2.html',{'temp': data})
def post(request):
form = Signupform(request.POST)
if request.method == 'POST' and form.is_valid():
if 'signup' in request.POST:
username = form.cleaned_data['user_name']
password = form.cleaned_data['password']
firstname = form.cleaned_data['first_name']
lastname = form.cleaned_data['last_name']
email = form.cleaned_data['email']
user = User.objects.create_user(username, email, password)
user.last_name = lastname
user.first_name = firstname
data1 = user.save()
print(data1)
return render(request, 'vv/index.html', {'val': 'singup', 'nam': 'singup'})
if 'signin' in request.POST:
username = form.cleaned_data['user_name']
password = form.cleaned_data['last_name']
return render(request, 'vv/templates/water/form.html')
if 'change' in request.POST:
username = form.cleaned_data['user_name']
password = form.cleaned_data['last_name']
return render(request, 'vv/templates/water/form.html')
else:
query1 = ['signup', 'signin', 'change']
return render(request, 'vv/templates/water/form.html', {'form': Signupform(), 'val': 'signup', 'nam': 'signup'})
def log_in(request):
context = {'form': Signinform(), 'pad': '30', 'val': 'Login', 'nam': 'signin', 'new': True}
form = Signinform(request.POST)
if request.method == 'POST' and form.is_valid():
if 'signin' in request.POST:
username1 = form.cleaned_data['user_name']
password1 = form.cleaned_data['password']
user = authenticate(request, username=username1, password=password1)
if user is not None:
login(request, user)
return HttpResponse('login sucess')
else:
return HttpResponse('login no possible')
else:
return render(request, 'analy/sign.html', context)
def log_out(request):
logout(request)
return HttpResponse('logout sucess')
def signup(request):
context = {'form': Signupform(), 'pad': '7', 'val': 'signup', 'nam': 'signup'}
form = Signupform(request.POST)
if request.method == 'POST' and form.is_valid():
if 'signup' in request.POST:
username = form.cleaned_data['user_name']
password = form.cleaned_data['password']
firstname = form.cleaned_data['first_name']
lastname = form.cleaned_data['last_name']
email = form.cleaned_data['email']
user = User.objects.create_user(username, email, password)
user.last_name = lastname
user.first_name = firstname
data1 = user.save()
return HttpResponse('signup sucess')
else:
return render(request, 'analy/form.html', context)
def change(request):
context = {'form': Resetform(), 'val': 'Reset password', 'nam': 'change'}
form = Resetform(request.POST)
if request.method == 'POST' and form.is_valid():
if 'change' in request.POST:
umail = form.cleaned_data['email']
return HttpResponse('reset your password success fully')
else:
return render(request, 'vv/templates/water/form.html', context)
def map(request):
data = weather_data.objects.all()[::-1]
# Reserved.objects.filter(client=client_id).order_by('-check_in')
return render(request,'analy/index.html',{'username':request.user.username,'temp':data})
def home(request):
data = weather_data.objects.all()
return render(request,'analy/home.html',{'username':request.user.username,'temp':data})
def anal(request):
data = weather_data.objects.all()
return render(request,'analy/anal.html',{'username':request.user.username,'temp':data})
# Create your views here.
def dash(request):
citis=['nagercoil','thirunelveli','chennai','coimbatore','madurai','pollachi','trichy','tuticorin','salem','erode','theni']
t = str(datetime.now())
#z = weather_data.objects.latest('time')
#w = weather_data(place='nagercoil',wind_speed=climate.wind_speed,wind_direction=wind_dir,time=t[11:16],temp=temp,astronomy=sunraise,date=date,presure=presure,humidity=humidity,visibility=visibility)
#w.save()
#climate()
# return render(request,'analy/dash.html',{'time': datetime.now(),'temp': weather_data.objects.all(),'user':request.user.username})
return render(request, 'analy/home.html', {'username': request.user.username, 'temp': data})
def test1(request,data):
return HttpResponse(data)
def choose(request):
pre.objects.all().delete()
for k in range(len(w)):
k = int(k)
d = pre(temp = t[k],date=nday[k],wind =w[k],presure =p[k],humidity=h[k])
d.save()
place = ''
citis =['Nagercoil', 'Tirunelveli', 'Chennai', 'Coimbatore', 'Madurai','Tiruchirappalli','Tuticorin','Salem', 'Erode', 'Theni']
if 'city' in request.GET:
place = request.GET.get('place')
data = weather_data.objects.filter(place=place)
dataf = pre.objects.all();
return render(request, 'analy/weather.html', {'username': request.user.username, 'temp': data,'cities':citis,'place': place,'f':dataf})
return render(request,'analy/weather.html',{'cities':citis,'place': place}) |
20,475 | 506e3b1e0765c98ab252df53836c072e9e9eaa98 | # -*- coding: utf-8 -*-
"""
Created on 5:56 PM, 11/4/15
@author: wt
"""
import sys
sys.path.append('..')
from networkx import *
import math
import matplotlib.pyplot as plt
import numpy as np
import csv
from sklearn.metrics import mean_squared_error
import os
# load a network from file (directed weighted network)
def load_network():
DG = DiGraph()
# /data/reference/sample_reply_mention.csv
# /data/echelon/mrredges-no-tweet-no-retweet-poi-counted.csv
# file_path = os.sep.join(os.path.dirname(__file__).split(os.sep)[:-1])+'/data/reference/sample_reply_mention.csv'
file_path = '../data/ed/ed_net.csv'
print file_path
with open(file_path, 'rt') as fo:
reader = csv.reader(fo)
first_row = next(reader)
for row in reader:
n1 = (row[1])
n2 = (row[0])
# b_type = row[2]
weightv = 1
# reply-to mentioned
if (DG.has_node(n1)) and (DG.has_node(n2)) and (DG.has_edge(n1, n2)):
DG[n1][n2]['weight'] += weightv
else:
DG.add_edge(n1, n2, weight=weightv)
return DG
def get_adjacency_matrix(DG):
A = adjacency_matrix(DG, weight=None) # degree matrix, i.e. 1 or 0
#A = adjacency_matrix(DG) # strength matrix, i.e., the number of edge
return A
def out_adjacency_matrix(DG):
A = adjacency_matrix(DG, weight=None) # degree matrix, i.e. 1 or 0
#A = adjacency_matrix(DG) # strength matrix, i.e., the number of edge
Ade = A.todense()
Nlist = map(int, DG.nodes())
print len(Nlist)
with open('degree_adjacency_matrix.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow([0]+Nlist)
for index in xrange(len(Nlist)):
spamwriter.writerow([Nlist[index]] + Ade[index].getA1().tolist())
def load_poi():
# Get profiles of all users
poi = {}
file_path = os.sep.join(os.path.dirname(__file__).split(os.sep)[:-1])+'/data/ed/ed_poi.csv'
f = open(file_path, 'rb')
reader = csv.reader(f, lineterminator='\n')
first_row = next(reader)
for row in reader:
des = row[3]
row[3] = des.replace('\n', ' ').replace('\r', ' ').replace('\r\n', ' ').replace('\n\r', ' ')
# print '-------------'
# print row[3]
poi[row[0]] = row
# return the description in the FIRST row and contents
return (first_row, poi)
def out_targted_poi(DG):
# print 'Output poi in DG network'
Nlist = map(int, DG.nodes())
print len(Nlist)
first_row, poi = load_poi()
csvfile = open('targeted-poi.csv', 'wb')
spamwriter = csv.writer(csvfile)
spamwriter.writerow(first_row)
for index in xrange(len(Nlist)):
# print poi.get(str(Nlist[index]))
spamwriter.writerow(poi.get(str(Nlist[index]), None))
def plot_whole_network(DG):
# pos = random_layout(DG)
# pos = shell_layout(DG)
pos = spring_layout(DG)
# pos = spectral_layout(DG)
# plt.title('Plot of Network')
draw(DG, pos)
plt.show()
def pdf(data, xmin=None, xmax=None, linear_bins=False, **kwargs):
if not xmax:
xmax = max(data)
if not xmin:
xmin = min(data)
if linear_bins:
bins = range(int(xmin), int(xmax))
else:
log_min_size = np.log10(xmin)
log_max_size = np.log10(xmax)
number_of_bins = np.ceil((log_max_size-log_min_size)*10)
bins = np.unique(
np.floor(
np.logspace(
log_min_size, log_max_size, num=number_of_bins)))
hist, edges = np.histogram(data, bins, density=True)
bin_centers = (edges[1:]+edges[:-1])/2.0
new_x, new_y = [], []
for index in xrange(len(hist)):
if hist[index] != 0:
new_x.append(bin_centers[index])
new_y.append(hist[index])
return new_x, new_y
def pearson(x, y):
# calculate the pearson correlation of two list
n = len(x)
avg_x = float(sum(x))/n
avg_y = float(sum(y))/n
print 'The means of two lists:', avg_x, avg_y
diffprod = 0.0
xdiff2 = 0.0
ydiff2 = 0.0
for idx in range(n):
xdiff = x[idx] - avg_x
ydiff = y[idx] - avg_y
diffprod += xdiff*ydiff
xdiff2 += xdiff*xdiff
ydiff2 += ydiff*ydiff
return diffprod/math.sqrt(xdiff2*ydiff2)
def drop_zeros(list_a):
# discard the zeros in a list
return [i for i in list_a if i>0]
def rmse(predict, truth):
# calculate RMSE of a prediction
RMSE = mean_squared_error(truth, predict)**0.5
return RMSE
def mean_bin(list_x, list_y, linear_bins=False):
# the returned values are raw values, not logarithmic values
size = len(list_x)
xmin = min(list_x)
xmax = max(list_x)
if linear_bins:
bins = range(int(xmin), int(xmax+1))
else:
log_min_size = np.log10(xmin)
log_max_size = np.log10(xmax+1)
number_of_bins = np.ceil((log_max_size-log_min_size)*10)
bins = np.unique(
np.floor(
np.logspace(
log_min_size, log_max_size, num=number_of_bins)))
new_bin_meanx_x, new_bin_means_y = [], []
hist_x = np.histogram(list_x, bins)[0]
hist_x_w = np.histogram(list_x, bins, weights=list_x)[0].astype(float)
for index in xrange(len(bins)-1):
if hist_x[index] != 0:
new_bin_meanx_x.append(hist_x_w[index]/hist_x[index])
range_min, range_max = bins[index], bins[index+1]
sum_y = 0.0
for i in xrange(size):
key = list_x[i]
if (key >= range_min) and (key < range_max):
sum_y += list_y[i]
new_bin_means_y.append(sum_y/hist_x[index])
return new_bin_meanx_x, new_bin_means_y
def cut_lists(list_x, list_y, fit_start=-1, fit_end=-1):
if fit_start != -1:
new_x, new_y = [], []
for index in xrange(len(list_x)):
if list_x[index] >= fit_start:
new_x.append(list_x[index])
new_y.append(list_y[index])
list_x, list_y = new_x, new_y
if fit_end != -1:
new_x, new_y = [], []
for index in xrange(len(list_x)):
if list_x[index] < fit_end:
new_x.append(list_x[index])
new_y.append(list_y[index])
list_x, list_y = new_x, new_y
return (list_x, list_y)
def lr_ls(list_x, list_y, fit_start=-1, fit_end=-1):
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
coefficients = np.polyfit(logX, logY, 1)
polynomial = np.poly1d(coefficients)
print 'Polynomial: (', fit_start, fit_end, ')', polynomial
logY_fit = polynomial(logX)
print 'Fitting RMSE(log)', rmse(logY, logY_fit)
print 'Fitting RMSE(raw)', rmse(Y, np.power(10, logY_fit))
# print Y
return (list_x, np.power(10, logY_fit))
# return logX, logY_fit
def lr_ml(list_x, list_y, fit_start=-1, fit_end=-1):
# TODO
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
def lr_ks(list_x, list_y, fit_start=-1, fit_end=-1):
# TODO
list_x, list_y = cut_lists(list_x, list_y, fit_start, fit_end)
X = np.asarray(list_x, dtype=float)
Y = np.asarray(list_y, dtype=float)
logX = np.log10(X)
logY = np.log10(Y)
def neibors_static(DG, node, neib='pre', direct='in', weight=False):
if neib == 'suc':
neibors = DG.successors(node)
else:
neibors = DG.predecessors(node)
if direct == 'out':
if weight:
values = [DG.out_degree(n, weight='weight') for n in neibors]
else:
values = [DG.out_degree(n) for n in neibors]
else:
if weight:
values = [DG.in_degree(n, weight='weight') for n in neibors]
else:
values = [DG.in_degree(n) for n in neibors]
if len(neibors):
return float(sum(values))/len(neibors)
else:
return 0.0
def dependence(listx, listy, l, xlabel, ylabel, start=1, end=1000):
plt.clf()
plt.scatter(listx, listy, s=20, c='#fee8c8', marker='+', label='raw '+l)
ax = plt.gca()
xmeans, ymeans = mean_bin(listx, listy)
ax.scatter(xmeans, ymeans, s=50, c='#fdbb84', marker='o', label='binned '+l)
xfit, yfit = lr_ls(xmeans, ymeans, start, end)
ax.plot(xfit, yfit, c='#e34a33', linewidth=2, linestyle='--', label='Fitted '+l)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
ax.set_xlim(xmin=1)
ax.set_ylim(ymin=1)
handles, labels = ax.get_legend_handles_labels()
leg = ax.legend(handles, labels, loc=4)
leg.draw_frame(True)
plt.show()
'''Plot PDF'''
def pdf_plot(data, name, fit_start, fit_end):
# plt.gcf()
# data = outstrength
list_x, list_y = pdf(data, linear_bins=True)
plt.plot(list_x, list_y, 'r+', label='Raw '+name)
ax = plt.gca()
list_x, list_y = pdf(data)
ax.plot(list_x, list_y, 'ro', label='Binned '+name)
# list_fit_x, list_fit_y = lr_ls(list_x, list_y, 1, 100)
# ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted outstrength')
list_fit_x, list_fit_y = lr_ls(list_x, list_y, fit_start, fit_end)
ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted '+name)
# data = outstrength
# list_x, list_y = pdf(data, linear_bins=True)
# ax.plot(list_x, list_y, 'b+', label='Raw outstrength')
# ax = plt.gca()
# list_x, list_y = pdf(data)
# ax.plot(list_x, list_y, 'bo', label='Binned outstrength')
# list_fit_x, list_fit_y = lr_ls(list_x, list_y, 50)
# ax.plot(list_fit_x, list_fit_y, 'b--', label='Fitted outstrength')
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel('k')
ax.set_ylabel('p(k)')
ax.set_xlim(xmin=1)
ax.set_ylim(ymax=1)
handles, labels = ax.get_legend_handles_labels()
leg = ax.legend(handles, labels, loc=0)
leg.draw_frame(True)
plt.show()
# network analysis
DG = load_network()
print 'The number of nodes: %d' %(DG.order())
print 'The number of nodes: %d' %(DG.__len__())
print 'The number of nodes: %d' %(DG.number_of_nodes())
print 'The number of edges: %d' %(DG.size())
print 'The number of self-loop: %d' %(DG.number_of_selfloops())
# plot_whole_network(DG)
# plot_whole_network(DG)
# G = DG.to_undirected()
# print 'Network is connected:', (nx.is_connected(G))
# print 'The number of connected components:', (nx.number_connected_components(G))
# largest_cc = max(nx.connected_components(G), key=len)
#
#
# for node in DG.nodes():
# if node not in largest_cc:
# DG.remove_node(node)
print 'The plot of in-degree and out-degree of nodes'
print 'Node \t In \t Out \t In+Out'
indegree, outdegree, instrength, outstrength = [],[],[],[]
suc_in_d, suc_out_d, pre_in_d, pre_out_d = [], [], [], []
suc_in_s, suc_out_s, pre_in_s, pre_out_s = [], [], [], []
for node in DG.nodes():
# print 'Degree: %s \t %d \t %d \t %d' %(node, DG.in_degree(node), DG.out_degree(node), DG.degree(node))
# print 'Strength: %s \t %d \t %d \t %d' %(node, DG.in_degree(node, weight='weight'), DG.out_degree(node, weight='weight'), DG.degree(node, weight='weight'))
in_d, out_d, in_s, out_s = DG.in_degree(node), DG.out_degree(node), DG.in_degree(node, weight='weight'), DG.out_degree(node, weight='weight')
if in_d and out_d:
indegree.append(in_d)
outdegree.append(out_d)
instrength.append(in_s)
outstrength.append(out_s)
# print 'node',node,'indegree', in_d, 'outdegree', out_d
suc_in_d.append(neibors_static(DG, node, 'suc', 'in', False))
suc_out_d.append(neibors_static(DG, node, 'suc', 'out', False))
pre_in_d.append(neibors_static(DG, node, 'pre', 'in', False))
pre_out_d.append(neibors_static(DG, node, 'pre', 'out', False))
suc_in_s.append(neibors_static(DG, node, 'suc', 'in', True))
suc_out_s.append(neibors_static(DG, node, 'suc', 'out', True))
pre_in_s.append(neibors_static(DG, node, 'pre', 'in', True))
pre_out_s.append(neibors_static(DG, node, 'pre', 'out', True))
# pdf_plot(indegree, 'indegree', 100, 1000)
# dependence(indegree, outdegree, '$k_o(k_i)$', 'indegree', 'outdegree', 1, 300)
# dependence(outdegree, indegree, '$k_i(k_o)$', 'outdegree', 'indegree')
# dependence(instrength, outstrength, '$s_o(s_i)$', 'instrength', 'outstrength', 50, 1700)
# dependence(outstrength, instrength, '$s_i(s_o)$', 'outstrength', 'instrength')
# dependence(indegree, pre_in_d, '$k_{i}^{pre}(k_i)$', 'indegree', 'Avg. Indegree of predecessors', 50)
# dependence(indegree, pre_out_d, '$k_{o}^{pre}(k_i)$', 'indegree', 'Avg. Outdegree of predecessors', 50)
# dependence(indegree, suc_in_d, '$k_{i}^{suc}(k_i)$', 'indegree', 'Avg. Indegree of successors', 50)
# dependence(indegree, suc_out_d, '$k_{o}^{suc}(k_i)$', 'indegree', 'Avg. Outdegree of successors', 50)
# dependence(outdegree, pre_in_d, '$k_{i}^{pre}(k_o)$', 'outdegree', 'Avg. Indegree of predecessors', 50)
# dependence(outdegree, pre_out_d, '$k_{o}^{pre}(k_o)$', 'outdegree', 'Avg. Outdegree of predecessors', 50)
# dependence(outdegree, suc_in_d, '$k_{i}^{suc}(k_o)$', 'outdegree', 'Avg. Indegree of successors', 50)
# dependence(outdegree, suc_out_d, '$k_{o}^{suc}(k_o)$', 'outdegree', 'Avg. Outdegree of successors', 50)
# dependence(instrength, pre_in_s, '$s_{i}^{pre}(s_i)$', 'Instrength', 'Avg. instrength of predecessors', 50)
# dependence(instrength, pre_out_s, '$s_{o}^{pre}(s_i)$', 'Instrength', 'Avg. outstrength of predecessors', 50)
# dependence(instrength, suc_in_s, '$s_{i}^{suc}(s_i)$', 'Instrength', 'Avg. instrength of successors', 50)
# dependence(instrength, suc_out_s, '$s_{o}^{suc}(s_i)$', 'Instrength', 'Avg. outstrength of successors', 50)
# dependence(outstrength, pre_in_d, '$s_{i}^{pre}(s_o)$', 'Outstrength', 'Avg. instrength of predecessors', 50)
# dependence(outstrength, pre_out_d, '$s_{o}^{pre}(s_o)$', 'Outstrength', 'Avg. outstrength of predecessors', 50)
# dependence(outstrength, suc_in_d, '$s_{i}^{suc}(s_o)$', 'Outstrength', 'Avg. instrength of successors', 50)
# dependence(outstrength, suc_out_d, '$s_{o}^{suc}(s_o)$', 'Outstrength', 'Avg. outstrength of successors', 50)
# print 'pearson correlation of indegree and outdegree: %f' %(pearson(indegree, instrength))
# print 'pearson correlation of instrength and outstrength: %f' %(pearson(outdegree, outstrength))
#
# print 'radius: %d' %(radius(DG))
# print 'diameter: %d' %(diameter(DG))
# print 'eccentricity: %s' %(eccentricity(DG))
# print 'center: %s' %(center(DG))
# print 'periphery: %s' %(periphery(DG))
# print 'density: %s' %(density(DG))
# klist, plist = pmd(instrength)
# fit = powerlaw.Fit(instrength)
# print 'powerlaw lib fit'
# print fit.alpha
# figPDF = powerlaw.plot_pdf(instrength, color='b')
# powerlaw.plot_pdf(instrength, linear_bins=True, color='r', ax=figPDF)
# figPDF.scatter(klist, plist, c='k', s=50, alpha=0.4,marker='+', label='Raw')
# plt.show() |
20,476 | 3bd55e4fa338d4e2fdb395ef5fc927a60ff2f0fa | import json
import re
from lxml import etree
from db.models import User
from logger import crawler
def num_str_to_int(num_str):
if not num_str:
return 0
return int(num_str.replace(",", ""))
def get_detail(user_name, html):
user = User()
user.name = user_name
root = etree.HTML(html)
headline_xpath = "//span[@class='ztext ProfileHeader-headline']/text()"
avatar_xpath = "//img[@class='Avatar Avatar--large UserAvatar-inner']/@src"
career_xpath = "//div[@class='ProfileHeader-infoItem'][1]/text()"
education_xpath = "//div[@class='ProfileHeader-infoItem'][2]/text()"
follow_xpath = "//strong[@class='NumberBoard-itemValue']"
img_xpath = "//img[@class='Avatar Avatar--large UserAvatar-inner']/@src"
try:
headline_item = root.xpath(headline_xpath)
if headline_item:
user.headline = headline_item[0]
avatar_item = root.xpath(avatar_xpath)
if avatar_item:
user.avatar = avatar_item[0]
career_item = root.xpath(career_xpath)
if career_item:
user.career = career_item[0]
user.education = " ".join(root.xpath(education_xpath))
follow_item = root.xpath(follow_xpath)
if follow_item:
user.follower = num_str_to_int(follow_item[0].text)
user.following = num_str_to_int(follow_item[1].text)
approve_item = re.search(r"获得 (\d+(?:,\d+)) 次赞同", html)
if approve_item:
user.approve = num_str_to_int(approve_item.group(1))
thanks_and_collect = re.search(r"获得 (\d+(?:,*\d+)) 次感谢,(\d+(?:,*\d+)) 次收", html)
if thanks_and_collect:
thanks_str, collect_str = thanks_and_collect.groups()
user.thanks = num_str_to_int(thanks_str)
user.collect = num_str_to_int(collect_str)
else:
user.thanks, user.collect = -1, -1
img_item = root.xpath(img_xpath)
if img_item:
user.image_url = img_item[0]
except Exception:
crawler.exception(f"error!user_name = {user_name}")
return user
def get_fans_or_follows(html, user_name):
res = json.loads(html)
user_names = list()
for item in res["data"]:
user_names.append(item.get("url_token"))
is_end = res["paging"]["is_end"]
return user_names, is_end
|
20,477 | c6ea99dca9c294ba0ea6eac6876022bcc5fc6af7 | films = {
"Тренер Картер": "Фильм про нищебродскую баскетбольную команду,которая \
почти стала лучшей в стране, после прихода в нее нового тренера,проделавшего \
с командой большую работу",
"Гарри Поттер и Тайная комната": "Второй фильм про Гарри .Да присутствует \
феникс, василиск, меч Грифиндора и молодой Том Редл",
"Перси Джексон и Похититель молний": "Не только в школе волшебства и \
чародейства 'Хогвартс' происходят загадочные истрашные события. Перси Джексон \
- американский школьник (и по совместительству сын Посейдона) ,едва не \
становится жертвой учительницы математики ( оказавшейся злой фурией)",
"Игра Эндера": "Фильм, рассказывающий об удивительном мальчике по имени\
Эндер. Он живет в такое время ,когда весь мир готовится к повторному нашествию\
их врагов.И Эндер единственный , кто может их остановить, пусть ему всего 12",
"Мстители": "Мстители впервые собираются для угрозы , которую нельзя убрать\
по одиночке - вторжение на Землю во главе с Локи (скандинавский бог, а также\
брат Тора). В этом фильме показывается сбор мстителей и их становление как\
лучшей команды на Земле",
"Человек-паук": "В главной роли лучший из паучков - Тоби Магуайр!!\
Рассказывается о получении Питером способностей и становлением его как героя",
"Капитан Америка": "Фильм о первом супергерое Америки. Вы увидите путь от \
дрыща до супергероя",
"Назад в будущее 1-3": "Трилогия расскажет вам про Дока и Марти и их \
приключения по таймлайне",
5: "Фильм про 5 подвигов Тесея",
("Monday","Tuesday","Wednesday"): "Фильм про дни недели",
30: "Документальный фильм про Стефана Карри",
(" Майкл Джордан","Коби Брайнт","Леброн Джеймс","Стефан Карри"): "Документ\
альный фильм про лучших баскетболистов в истории"
}
print(films)
print("-" * 100)
for name, desc in films.items():
print("Фильм: " + str(name) + " Описание :" + str(desc))
|
20,478 | e7b20d5861266c0ec5d041ba20e41c286be8cb64 | import os
import random
import string
def createUniqueWARC():
lines = []
warcInFilename = 'frogTest.warc'
warcInPath = os.path.join(os.path.dirname(__file__) + '/samples/warcs/' +
warcInFilename)
stringToChange = 'abcdefghijklmnopqrstuvwxz'
randomString = getRandomString(len(stringToChange))
with open(warcInPath, 'r') as warcFile:
newContent = warcFile.read().replace(stringToChange, randomString)
warcOutFilename = warcInFilename.replace('.warc', '_' +
randomString + '.warc')
warcOutPath = os.path.join(os.path.dirname(__file__) +
'/samples/warcs/' + warcOutFilename)
with open(warcOutPath, 'w') as warcFile:
warcFile.write(newContent)
return warcOutPath
def getRandomString(n):
return ''.join(random.SystemRandom().choice(
string.ascii_lowercase + string.digits) for _ in range(n))
|
20,479 | 53937f85e9fa029196cc05e8407694ddf3958451 | from sklearn import datasets
from sklearn.preprocessing import LabelEncoder, StandardScaler, MinMaxScaler
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as skLDA
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from scipy import stats
import numpy as np
import impyute as impy
from fancyimpute import IterativeSVD, SoftImpute, NuclearNormMinimization
import pandas as pd
import time
"""### LDA and nan function"""
'''
function that create data list that contain missing values
The input X is a numpy array, y is the label
the function return a list where the ith element of
the list belongs to the ith class
'''
def make_nan_list(X,y,G, n, p):
# note that the label should go from 0 to G-1
data = []
for g in np.arange(G):
data.append(X[y==g,:])
for k in np.arange(len(p)-1):
data[g][n[g,k+1]:n[g,k], p[k]:] = np.nan
return data
"""### compute_err function"""
def missing_rate(Xtrain, ytrain, n, p, G):
Xtr_nan_list = make_nan_list(Xtrain,ytrain,G, n, p)
# make NA data
# since making function changes the order of observation
# we need to generate new ytr from Xtr_nan
Xtr_nan, ytr = Xtr_nan_list[0], np.repeat(0, len(Xtr_nan_list[0]))
for g in np.arange(1,G):
Xtr_nan = np.vstack((Xtr_nan, Xtr_nan_list[g]))
ytr = np.hstack((ytr, np.repeat(g, len(Xtr_nan_list[g]))))
# percentage of missing values
per_missing = np.mean(np.isnan(Xtr_nan))
return per_missing
def compute_err_Nuclear(Xtrain, ytrain, Xtest, ytest, n, p, G):
Xtr_nan_list = make_nan_list(Xtrain,ytrain,G, n, p)
# make NA data
# since making function changes the order of observation
# we need to generate new ytr from Xtr_nan
Xtr_nan, ytr = Xtr_nan_list[0], np.repeat(0, len(Xtr_nan_list[0]))
for g in np.arange(1,G):
Xtr_nan = np.vstack((Xtr_nan, Xtr_nan_list[g]))
ytr = np.hstack((ytr, np.repeat(g, len(Xtr_nan_list[g]))))
# percentage of missing values
per_missing = np.mean(np.isnan(Xtr_nan))
scaler = MinMaxScaler()
scaler.fit(Xtr_nan)
Xtr_nan = scaler.transform(Xtr_nan)
Xtest = scaler.transform(Xtest)
Xtr_nan_list2 = []
for g in range(G):
Xtr_nan_list2.append(scaler.transform(Xtr_nan_list[g]))
#impute,classify and get the error rates for imputation approaches
start = time.time()
Xtr_nuclear = NuclearNormMinimization(max_iters=10).fit_transform(Xtr_nan)
clf_nuclear = skLDA().fit(Xtr_nuclear, ytr)
nuclear_err = np.mean(clf_nuclear.predict(Xtest).flatten() != ytest)
nuclear_time = time.time()-start
return nuclear_err, nuclear_time
"""## Import Fashion MNIST"""
import tensorflow as tf
fashion_mnist = tf.keras.datasets.fashion_mnist
(Xtrain, ytrain), (Xtest, ytest) = fashion_mnist.load_data()
Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape
Xtrain = Xtrain.astype(float).reshape((60000,784))
# set random seed and shuffle the data
np.random.seed(1)
idx = np.arange(len(ytrain))
np.random.shuffle(idx)
Xtrain, ytrain = Xtrain[idx,:], ytrain[idx]
Xtrain.shape, ytrain.shape
# convert the test set to NumPy arrays and flatten the data
Xtest = Xtest.astype(float).reshape((10000,784))
# number of sample per class in training data
ng = np.asarray([sum(ytrain==i) for i in np.arange(10)])
"""## 20%"""
n = np.hstack((ng.reshape((-1,1)), np.tile([4500,4200,4000, 3800],
10).reshape((10,-1))))
p = np.array([310,400,480, 520,784])
missing_rate(Xtrain, ytrain, n, p, 10)
nuclear20 = compute_err_Nuclear(Xtrain, ytrain, Xtest, ytest, n, p, 10)
"""## 30%"""
n = np.hstack((ng.reshape((-1,1)), np.tile([4400,4000,3400, 3000],
10).reshape((10,-1))))
p = np.array([250,310,400, 450,784])
missing_rate(Xtrain, ytrain, n, p, 10)
nuclear30 = compute_err_Nuclear(Xtrain, ytrain, Xtest, ytest, n, p, 10)
"""## 40%"""
n = np.hstack((ng.reshape((-1,1)), np.tile([3600,3400,3000, 2500],
10).reshape((10,-1))))
p = np.array([200,220,300, 400,784])
missing_rate(Xtrain, ytrain, n, p, 10)
nuclear40 = compute_err_Nuclear(Xtrain, ytrain, Xtest, ytest, n, p, 10)
"""## 50%"""
n = np.hstack((ng.reshape((-1,1)), np.tile([3000,2900,2700, 2500],
10).reshape((10,-1))))
p = np.array([100,150,220, 250,784])
missing_rate(Xtrain, ytrain, n, p, 10)
result = np.vstack((nuclear20, nuclear30, nuclear40))
np.savetxt("fashion_nuclear.csv", result, delimiter=",")
|
20,480 | 18accd11da258081a788e1dce7f0fd0ccb7c9491 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 8 00:14:10 2015
@author: ericwu
"""
import matplotlib.pyplot as plt
import sys
import pickle
sys.path.append("./tools/")
from feature_format import featureFormat
from feature_format import targetFeatureSplit
import numpy as np
import myTools
##############################################################################
### features_list is a list of strings, each of which is a feature name #
### first feature must be "poi", as this will be singled out as the label #
##############################################################################
features_list = []
email_features_list = ['to_messages', 'from_poi_to_this_person',
'from_messages', 'from_this_person_to_poi',
'shared_receipt_with_poi']
financial_features_list = ['salary', 'deferral_payments', 'total_payments',
'loan_advances', 'bonus', 'restricted_stock_deferred',
'deferred_income', 'total_stock_value', 'expenses',
'exercised_stock_options', 'other', 'long_term_incentive',
'restricted_stock', 'director_fees']
target_label = ['poi']
# total features list: The first one should be 'poi' (target label)
total_features_list = target_label + email_features_list + financial_features_list
# financial features list with target label
financial_features_list = target_label + financial_features_list
# email features list with target label
email_features_list = target_label + email_features_list
### load the dictionary containing the dataset
data_dict = pickle.load(open("final_project_dataset.pkl", "r") )
### we suggest removing any outliers before proceeding further
### if you are creating any new features, you might want to do that here
### store to my_dataset for easy export below
my_dataset = data_dict
# This step only selects the features which the available data > 82
selected_features_list = myTools.select_features_by_num(my_dataset, total_features_list, threshold = 70)
features_list = selected_features_list
# Remove the "TOTAL" and "THE TRAVEL AGENCY IN THE PARK" data point (outliers)
my_dataset.pop('TOTAL', 0)
my_dataset.pop('THE TRAVEL AGENCY IN THE PARK', 0)
# Import AddingFeature class to add new feature
#from AddingFeature import AddingFeature
#addFeature = AddingFeature(my_dataset, features_list)
#addFeature.duplicate_feature("exercised_stock_options", "exercised_stock_options_1")
#
#features_list = addFeature.get_current_features_list()
#my_dataset = addFeature.get_current_data_dict()
### these two lines extract the features specified in features_list
### and extract them from data_dict, returning a numpy array
data = featureFormat(my_dataset, features_list)
#label_data = data[:,0]
#to_email_fraction = np.nan_to_num(data[:,4]/data[:,1])
#from_email_fraction = np.nan_to_num(data[:,2]/data[:,3])
#
#data = np.column_stack((label_data, to_email_fraction, from_email_fraction))
### if you are creating new features, could also do that here
##############################################################################
### split into labels and features (this line assumes that the first #
### feature in the array is the label, which is why "poi" must always #
### be first in features_list #
##############################################################################
labels, features = targetFeatureSplit(data)
# Preprocessing the data
#from sklearn.preprocessing import MinMaxScaler
#scaler = MinMaxScaler()
#features = scaler.fit_transform(features)
## Using feature selection to select the feature
#from sklearn.feature_selection import SelectKBest
#from sklearn.feature_selection import f_classif
#k = 6
#selectKB = SelectKBest(f_classif, k = k)
#features = selectKB.fit_transform(features, labels)
#index = selectKB.get_support().tolist()
#
#new_features_list = []
#for i in range(len(index)):
# if index[i]:
# new_features_list.append(features_list[i+1])
#
## Insert poi to the first element
#new_features_list.insert(0, "poi")
from sklearn.decomposition import RandomizedPCA
n_components = 9
pca = RandomizedPCA(n_components = n_components, whiten = True)
pca.fit(features)
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
skf = StratifiedKFold( labels, n_folds=3 )
accuracies = []
precisions = []
recalls = []
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
for train_idx, test_idx in skf:
features_train = []
features_test = []
labels_train = []
labels_test = []
for ii in train_idx:
features_train.append( features[ii] )
labels_train.append( labels[ii] )
for jj in test_idx:
features_test.append( features[jj] )
labels_test.append( labels[jj] )
features_train = pca.transform(features_train)
features_test = pca.transform(features_test)
### fit the classifier using training set, and test on test set
# parameter = {'base_estimator':[None, DecisionTreeClassifier(),
# RandomForestClassifier()],
# 'n_estimators':[20, 50]}
# Here comes weird part
# parameter = {'base_estimator':[None, RandomForestClassifier(), LogisticRegression(),
# DecisionTreeClassifier()], 'n_estimators':[20, 50]}
#
# adaBoost = AdaBoostClassifier(learning_rate = 1, random_state = 0, algorithm='SAMME.R')
# clf = GridSearchCV(adaBoost, parameter)
logisticR = LogisticRegression()
parameter = {'penalty': ['l1', 'l2'], 'C': [0.1, 0.5, 1],
'class_weight': [None, 'auto']}
clf = GridSearchCV(logisticR, parameter)
# base_estimator = RandomForestClassifier()
# clf = AdaBoostClassifier(base_estimator = None ,n_estimators = 50, learning_rate = 1, random_state = 0, algorithm='SAMME.R')
# clf = RandomForestClassifier(n_estimators = 2, random_state = 0)
clf.fit(features_train, labels_train)
pred = clf.predict(features_test)
accuracy = clf.score(features_test, labels_test)
### for each fold, print some metrics
print
print "Accuracy: %f " %accuracy
print "precision score: ", precision_score( labels_test, pred )
print "recall score: ", recall_score( labels_test, pred )
accuracies.append(accuracy)
precisions.append( precision_score(labels_test, pred) )
recalls.append( recall_score(labels_test, pred) )
### aggregate precision and recall over all folds
print "average accuracy: ", sum(accuracies)/3.
print "average precision: ", sum(precisions)/3.
print "average recall: ", sum(recalls)/3.
#features_list = new_features_list
#data_dict = my_dataset
####################################################################
### dump your classifier, dataset and features_list so #
### anyone can run/check your results #
####################################################################
pickle.dump(clf, open("my_classifier.pkl", "w") )
pickle.dump(data_dict, open("my_dataset.pkl", "w") )
pickle.dump(features_list, open("my_feature_list.pkl", "w") )
|
20,481 | 9e256c594ea3a7a25b67a1da61db0951f6baaa7a | ##GCD Algorithm##
#Nick Bellinger
#Mod Cryptography
#2/7/19
#Ask for input for a and b
a = input("Enter first number: ")
b = input("Enter second number: ")
#Convert a and b to integers
a = int(a)
b = int(b)
#GCD function
def GCD(a,b):
if b == 0:
return a
else:
return GCD(b,a%b)
#Print answer for GCD to a and b
print("GCD: " + str(GCD(a,b)))
|
20,482 | 6f67d6403160bfb48cc6848bad907071c1dbaa31 | from crawlers.activities_crawler import activities_crawler
from crawlers.calendar_crawler import calendar_crawler
from crawlers.extra_curr_announ_crawler import extra_curr_announ_crawler
from crawlers.announ_crawler import *
from crawlers.extra_curr_today_crawler import extra_curr_today_crawler
class AnnounCrawler(object):
def __init__(self):
pass
def get_activities(self):
return activities_crawler()
def get_calendar(self):
return calendar_crawler()
def get_extra_curr_announ(self):
return extra_curr_announ_crawler()
def get_ia(self):
return ia_crawler()
def get_ntu_mana_dep(self):
return ntu_mana_dep_crawler()
def get_ntuba(self):
return ntuba_crawler()
def get_ntuacc(self):
return ntuacc_crawler()
def get_ntufin(self):
return ntufin_crawler()
def get_ntuib(self):
return ntuib_crawler()
def get_ntuim(self):
return ntuim_crawler()
def get_ntuemba(self):
return ntuemba_crawler()
def get_ntueimba(self):
return ntueimba_crawler()
def get_general_affairs(self):
return general_affairs_crawler()
def get_ga_doc(self):
return ga_doc_crawler()
def get_ga_general(self):
return ga_general_crawler()
def get_ga_property(self):
return ga_property_crawler()
def get_ga_construction(self):
return ga_construction_crawler()
def get_ga_cashier(self):
return ga_cashier_crawler()
def get_ga_procurement(self):
return ga_procurement_crawler()
def get_ga_facilities_service(self):
return ga_facilities_service_crawler()
def get_academic_affairs(self):
return academic_affairs_crawler()
def get_student_affairs(self):
return student_affairs_crawler()
def get_extra_curr_today(self):
return extra_curr_today_crawler()
|
20,483 | 34a74b772b288205d33a9c09d95aee852184fb9f | from os import system, name
import sys
import os
global_status_main = True
def set_global_main_value(value):
global global_status_main
global_status_main = value
def clear_screen():
if name == "nt":
_ = system("cls")
else:
_ = system("clear")
def get_path():
frozen = "not"
if getattr(sys, "frozen", False):
# we are running in a bundle
frozen = "ever so"
bundle_dir = sys._MEIPASS
else:
# we are running in a normal Python environment
bundle_dir = os.path.dirname(os.path.abspath(__file__))
print("we are", frozen, "frozen")
print("bundle dir is", bundle_dir)
print("sys.argv[0] is", sys.argv[0])
print("sys.executable is", sys.executable)
print("os.getcwd is", os.getcwd())
return bundle_dir
|
20,484 | 87ef8720ac8324545c55b0448d390181c3ce0e71 | class Solution:
def selfDividingNumbers(self, left, right):
"""
:type left: int
:type right: int
:rtype: List[int]
"""
# List = []
# if right < 10:
# return [num for num in range(left, right+1)]
# else:
# for i in range(left, right+1):
# for d in str(i):
# if int(d) == 0 or i % int(d) != 0:
# break
# else:
# List.append(i)
#
# return List
ss = lambda i: int(d) == 0 or i % int(d) != 0
res = Solution()
tt = res.selfDividingNumbers(1, 22)
print(tt)
|
20,485 | 78a981486372d9797cd392975d01e5bc7f60dbbc | from tkinter import *
from PIL import ImageTk, Image
from random import randint
def cambia_img():
numero1 = str(randint(1, 6))
nombreImagen1 = "dado" + numero1 + ".png"
imagen1 = Image.open(nombreImagen1)
new_img1 = imagen1.resize((100, 100))
render1 = ImageTk.PhotoImage(new_img1)
label1 = Label(ventana, image=render1)
label1.image = render1
label1.place(x=20,y=40)
numero2 = str(randint(1, 6))
nombreImagen2 = "dado" + numero2 + ".png"
imagen2 = Image.open(nombreImagen2)
new_img2 = imagen2.resize((100, 100))
render2 = ImageTk.PhotoImage(new_img2)
label2 = Label(ventana, image=render2)
label2.image = render2
label2.place(x=130,y=40)
ventana = Tk()
ventana.title("Simulador de dado")
ventana.geometry("260x220")
ventana.resizable(False, False)
label = Label(ventana, text="Presione el botón para lanzar los dados")
label.pack()
imagenDefault1 = ImageTk.PhotoImage(Image.open("default.jpg").resize((100, 100)))
label1 = Label(ventana, image=imagenDefault1)
label1.place(x=20, y=40)
imagenDefault2 = ImageTk.PhotoImage(Image.open("default.jpg").resize((100, 100)))
label2 = Label(ventana, image=imagenDefault2)
label2.place(x=130, y=40)
btn = Button(ventana, command=cambia_img, width=6, height=1)
btn.config(text='lanzar')
btn.place(x=105, y=170)
ventana.mainloop() |
20,486 | 43826b3c0443743d8315a4be4a8dc56faa67c2ae | import common
import rum
if __name__ == '__main__':
with common.runtool(6) as parameters:
parameters[5] = common.toBool(parameters[5], rum.ABSOLUTE_FIELD_DESC) # absolute?
rum.applyToSegments(*parameters)
|
20,487 | 571f6b9056e2f535a32b559421c1c4b9c1898e78 | #!/usr/bin/env python
# encoding: utf-8
"""
untitled.py
Created by Olivier Huin on 2010-05-03.
Copyright (c) 2010 Flarebyte.com Limited. All rights reserved.
"""
import sys
import os
import flickr
from export import Exporter
class ImagesExporter(Exporter):
def __init__(self):
self.initialize()
def upload(self):
if (self.conn==None):
self.connect()
if (self.conn==None):
return False
photos=self.search_photos("Tate Modern")
print self.upload_museum_images("TateModern",photos)
def upload_museum_images(self,imgslug,mydict):
slug=imgslug+"/img"
return self.upload_slug(slug,mydict)
def search_photos(self,text):
photos = flickr.photos_search(text=text, per_page=9, extras="owner_name,url_sq, url_t, url_s, url_m, url_o")
r = []
for photo in photos:
sizes=photo.getSizes()
nsizes={}
for size in sizes:
nsizes[size["label"].lower()]=size
photodata = {"owner":{"username":photo.owner.username,"realname": photo.owner.realname},"sizes":nsizes}
r.append(photodata)
return r
exporter = ImagesExporter()
print exporter.upload()
#quicker to just 'hack' the url
#I don't think this works anymore, a change in url format
|
20,488 | 6880a93b7a28b3bdfcfa397f8636d9fece35d6e9 | """
Check that our selection of D1 and nD1 is appropriate.
To check:
- Are cells with NaN pvalues for laser counted as nD1?
- When is the SPIKE_QUALITY_THRESHOLD use? when creating the database?
"""
import sys
sys.path.append('..')
import os
import figparams
import studyparams
import numpy as np
from jaratoolbox import celldatabase
from jaratoolbox import settings
from jaratoolbox import extraplots
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from scipy import stats
#TEMPDB = os.path.join(settings.FIGURES_DATA_PATH, studyparams.STUDY_NAME, 'tempdb.h5')
TEMPDB = os.path.join(settings.FIGURES_DATA_PATH, studyparams.STUDY_NAME,
'tempdb_subset_good.h5')
toExclude = np.loadtxt(os.path.join(settings.FIGURES_DATA_PATH, studyparams.STUDY_NAME,
'cell_indices_manually_removed.txt'), dtype=int)
columnsToLoad = ['index', 'spikeShape', 'laserpulse_pVal', 'laserpulse_SpikeCountChange',
'laserpulse_responseSpikeCount']
cellDB = celldatabase.load_hdf(TEMPDB, columns=columnsToLoad)
#print(len(cellDB))
cellDB.drop(toExclude, inplace=True, errors='ignore')
#print(len(cellDB))
cellIndices = cellDB.index
#sys.exit()
spikeShape = np.array(list(cellDB['spikeShape']))
peakRange = slice(0,20)
peakEachSpike = -np.min(spikeShape[:,peakRange], axis=1)
normedSpikeShape = spikeShape/peakEachSpike[:, np.newaxis]
cellDB['normedSpikeShape'] = list(normedSpikeShape)
laserpulse_pVal_threshold = 0.05 # 0.001 if want to be extra sure not to include false positives
laserpulse_responseCount_threshold = 0.5
D1_CELLS = 'laserpulse_pVal<{} and laserpulse_SpikeCountChange>0 and laserpulse_responseSpikeCount>{}'.format(laserpulse_pVal_threshold, laserpulse_responseCount_threshold)
nD1_CELLS = 'not (laserpulse_pVal<{} and laserpulse_SpikeCountChange>0)'.format(laserpulse_pVal_threshold)
dbD1 = cellDB.query(D1_CELLS)
dbnD1 = cellDB.query(nD1_CELLS)
#dbD1 = cellDB.query(studyparams.D1_CELLS)
#dbnD1 = cellDB.query(studyparams.nD1_CELLS)
spikeShapeD1 = np.array(list(dbD1['normedSpikeShape']))
spikeShapeNonD1 = np.array(list(dbnD1['normedSpikeShape']))
SAMPLERATE = 30000
nSamples = spikeShape.shape[1]
timeVec = (np.arange(nSamples)-8)/SAMPLERATE
if 1:
plt.clf()
for counter, indc in enumerate(cellIndices):
plt.plot(1e6*timeVec, normedSpikeShape[counter,:])
plt.title(indc)
plt.waitforbuttonpress()
#plt.draw()
# -- Plot all --
# plot(spikeShapeNonD1.T)
# plot(spikeShapeD1.T)
#for indc in range(100): plot(normedSpikeShape[indc,:]); plt.waitforbuttonpress()
'''
dbD1 = cellDB.query(studyparams.D1_CELLS)
spikeShape = np.array(list(dbD1['spikeShape']))
peakEachSpike = -np.min(spikeShape[:,peakRange], axis=1)
normedSpikeShape = spikeShape/peakEachSpike[:, np.newaxis]
'''
'''
FIRST_FLTRD_CELLS = 'isiViolations<{} and spikeShapeQuality>{}'.format(ISI_THRESHOLD, SPIKE_QUALITY_THRESHOLD)
D1_CELLS = 'laserpulse_pVal<{} and laserpulse_SpikeCountChange>0 and laserpulse_responseSpikeCount>{}'.format(laserpulse_pVal_threshold, laserpulse_responseCount_threshold) # Respond to laser, thus D1-expressing cells
nD1_CELLS = 'not (laserpulse_pVal<{} and laserpulse_SpikeCountChange>0)'.format(laserpulse_pVal_threshold) # Did not respond to laser, thus non-D1-expressing cells
'''
|
20,489 | 1b4a85a5bea39c4d90f6cfccfa87dc3b978b81ab | import argparse
from tools.file_utils import IncorrectFileType
def create_dag_file(number_of_jobs: int, sub_filename: str, dag_filename: str):
file_handle = open(dag_filename, mode="w")
for i in range(0, number_of_jobs):
job_text = 'Job {} {} \n VARS {} injectionNumber="{}"\n'.format(
i, sub_filename, i, i
)
file_handle.write(job_text)
file_handle.close()
def parse_args(args):
parser = argparse.ArgumentParser(description="dag file creator")
required = parser.add_argument_group("required named arguments")
required.add_argument(
"--jobs", "-j", default=200, type=int, help="number of jobs to be created"
)
required.add_argument("--dag_fname", "-d", type=str, help="dag output filename")
required.add_argument("--sub_fname", "-s", type=str, help="subfile to run job")
args = parser.parse_args(args)
if not args.dag_fname.endswith(".dag"):
raise IncorrectFileType(
"Dag file doenst end with '.dag': {}".format(args.dag_fname)
)
if not args.sub_fname.endswith(".sub"):
raise IncorrectFileType(
"Sub file doenst end with '.sub': {}".format(args.sub_fname)
)
return args
|
20,490 | b65a79cdada4c3082f722df3150fcdc6cd51ddd7 | # SPDX-License-Identifier: BSD-3-Clause
"""Test job functionality.
Every test case checks both the in-memory database (same db object on which
operations were performed) and the on-disk database (used to load data
after Databases.reload() call is performed).
"""
import random
from pytest import raises
from softfab.resultcode import ResultCode
from softfab.taskgroup import TaskGroup
from softfab.utils import IllegalStateError
from datageneratorlib import DataGenerator
def locatorForTask(taskId):
return 'dummylocator@' + taskId
def taskDone(job, taskId, result=ResultCode.OK):
"""Marks a task as done, including all required locators."""
locators = {}
if result is not ResultCode.ERROR:
for out in job.getTask(taskId).getOutputs():
locators[out] = locatorForTask(taskId)
job.taskDone(taskId, result, 'summary text', (), locators)
def runWithReload(databases, config, verifyFunc):
configId = config.getId()
verifyFunc(config)
# TODO: Speed up job creation by adding to DB only after job is complete.
#databases.configDB.add(config)
databases.reload()
config = databases.configDB[configId]
verifyFunc(config)
def sanityCheck(gen, config):
# Verify job inputs.
### Not valid for configs any more
inputs = sorted(prod['name'] for prod in config.getInputs())
gen.inputsCreated.sort()
assert inputs == gen.inputsCreated
# Verify task sequence.
available = set(gen.inputsCreated)
taskSequence = config.getTaskSequence()
tasksLeft = set(gen.tasks)
for task in taskSequence:
taskId = task.getName()
assert taskId in tasksLeft
tasksLeft.remove(taskId)
for inp in task.getInputs():
assert inp in available
for out in task.getOutputs():
# Note: Currently, we do accept one output being produced
# multiple times, the first time counts and the other
# times are ignored. See test0041 for details.
#assert out not in available
available.add(out)
assert tasksLeft == set()
def simulate(databases, gen, config, checkFunc=None):
#print('simulate', config.getId())
sanityCheck(gen, config)
# TODO: Is it OK that Config.createJob does not put record into job DB?
# If so, document.
job, = config.createJobs(gen.owner)
# Note: Disabled to save time.
# TODO: The toXML functionality should probably be tested
# in a separate test case.
#databases.jobDB.add(job)
# Verify execution.
rnd = gen.rnd
available = set(gen.inputsCreated)
tasksLeft = set(gen.tasks)
# TODO: Do not use a list (performance).
freeTaskRunners = list(gen.taskRunners)
rnd.shuffle(freeTaskRunners)
usedTaskRunners = []
while True:
newFreeTaskRunners = []
for taskRunner in freeTaskRunners:
trRecord = databases.resourceDB[taskRunner]
taskRun = job.assignTask(trRecord)
if taskRun is None:
newFreeTaskRunners.append(taskRunner)
else:
task = taskRun.getTask()
# Verify capabilities.
trCaps = trRecord.capabilities
for cap in task.getNeededCaps():
assert cap in trCaps
# Update administration.
taskName = task.getName()
usedTaskRunners.append((taskRunner, taskName))
assert taskName in tasksLeft
tasksLeft.remove(taskName)
for inp in task.getInputs():
assert inp in available
# Check whether the right Task Runner is used
# for local products.
inpProd = job.getProduct(inp)
if inpProd.isLocal():
assert inpProd.getLocalAt() == taskRunner
for out in task.getOutputs():
# Note: Currently, we do accept one output being
# produced multiple times, the first time counts
# and the other times are ignored.
# See test0041 for details.
#assert out not in available
available.add(out)
freeTaskRunners = newFreeTaskRunners
if usedTaskRunners == []:
# All Task Runners are free and still unable to assign.
break
# Pick a Task Runner and let it finish its task.
index = rnd.randrange(len(usedTaskRunners))
taskRunner, taskId = usedTaskRunners[index]
del usedTaskRunners[index]
freeTaskRunners.append(taskRunner)
rnd.shuffle(freeTaskRunners)
taskDone(job, taskId)
if checkFunc is not None:
checkFunc(gen, job)
else:
assert tasksLeft == set()
def randomRuns(databases, runs, rnd, genClass, checkFunc=None):
for run in range(runs):
gen = genClass(databases, rnd, run)
gen.createDefinitions()
gen.createTaskRunners()
config = gen.createConfiguration()
gen.addCapabilities(config)
gen.setInputs(config)
# TODO: Write a separate log file for stats such as these.
#print('number of products:', len(gen.products))
#print('number of inputs:', len(gen.inputsCreated))
#print('number of input groups:', len(config.getInputsGrouped()))
runWithReload(databases, config,
lambda config: simulate(databases, gen, config, checkFunc)
)
def testJobProperties(databases):
"""Test whether global job properties are preserved."""
def checkProperties(config):
jobId = 'job0'
assert config.targets == {'target1', 'target2'}
assert config.getId() == jobId
assert config['name'] == jobId
assert config.owner == gen.owner
assert config['owner'] == gen.owner
assert config.comment == gen.comment
#assert config.getDescription() == config['description']
gen = DataGenerator(databases)
config = gen.createConfiguration(
targets=('target1', 'target2')
)
runWithReload(databases, config, checkProperties)
def testJobEmpty(databases):
"""Test whether empty job behaves correctly."""
def checkEmpty(config):
assert config.getParameter('') == None
assert len(config.getInputs()) == 0
assert len(config.getInputsGrouped()) == 0
assert len(config.getTasks()) == 0
assert len(config.getTaskSequence()) == 0
config = DataGenerator(databases).createConfiguration()
runWithReload(databases, config, checkEmpty)
def testJobOneTask(databases):
"""Test job with 1 task in it."""
class CustomGenerator(DataGenerator):
numTasks = 1
numInputs = [ 0 ]
numOutputs = [ 0 ]
gen = CustomGenerator(databases)
gen.createDefinitions()
config = gen.createConfiguration()
def checkOne(config):
taskName = gen.tasks[0]
#assert config.getProduct('') is None
assert len(config.getInputs()) == 0
assert len(config.getInputsGrouped()) == 0
assert len(config.getTasks()) == 1
task, = config.getTasks()
assert task is not None
assert task.getName() == taskName
assert len(config.getTaskSequence()) == 1
runWithReload(databases, config, checkOne)
def testJobDependencies(databases):
"""Test dependency resolution."""
class CustomGenerator(DataGenerator):
pass
seed = 0
rnd = random.Random(seed)
runs = 10
randomRuns(databases, runs, rnd, CustomGenerator)
def testJobTwiceProduct(databases):
"""Test producing the same product twice.
This reproduces a problem that occurred in the LVS SoftFab on 2005-06-21.
"""
class CustomGenerator(DataGenerator):
pass
gen = CustomGenerator(databases)
image = gen.createProduct('image')
buildFw = gen.createFramework('build', [], [ image ])
testFw = gen.createFramework('test', [ image ], [])
buildTask1 = gen.createTask('build1', buildFw)
buildTask2 = gen.createTask('build2', buildFw)
testTask = gen.createTask('test', testFw)
buildTR = gen.createTaskRunner(name='tr_build', capabilities=['build'])
testTR = gen.createTaskRunner(name='tr_test', capabilities=['test'])
def simulate(config):
sanityCheck(gen, config)
# TODO: Is it OK that Config.createJob does not put record into
# job DB? If so, document.
job, = config.createJobs(gen.owner)
# Note: Disabled to save time.
# TODO: The toXML functionality should probably be tested
# in a separate test case.
#self.jobDB.add(job)
# Verify execution:
# Successfully complete first build task.
task = job.assignTask(databases.resourceDB[buildTR])
assert task is not None
assert task.getName().startswith('build')
taskDone(job, task.getName())
# Start test task.
task = job.assignTask(databases.resourceDB[testTR])
assert task is not None
assert task.getName() == testTask
# Complete second build task, but make it fail.
task = job.assignTask(databases.resourceDB[buildTR])
assert task is not None
assert task.getName().startswith('build')
taskDone(job, task.getName(), ResultCode.ERROR)
# Successfully complete test task.
taskDone(job, testTask)
assert job.isExecutionFinished()
assert job.hasFinalResult()
runWithReload(databases, gen.createConfiguration(), simulate)
def testJobMultiTaskRunner(databases):
"""Test execution using multiple Task Runners."""
class CustomGenerator(DataGenerator):
chanceChainProduct = 0.4
numTaskRunners = 5
chanceTRFramework = 0.7
def frameworksForTaskRunner(self):
return [
framework for framework in self.frameworks
if self.rnd.random() < self.chanceTRFramework
]
seed = 123456789
rnd = random.Random(seed)
runs = 10
randomRuns(databases, runs, rnd, CustomGenerator)
def testJobTRSetRestrictJob(databases):
"""Test Task Runner restrictions at the job level.
Two Task Runners, only one is allowed at the job level.
"""
gen = DataGenerator(databases)
fwName = gen.createFramework('testfw1')
taskName = gen.createTask('task1', fwName)
tr1Name = gen.createTaskRunner(capabilities=[fwName])
tr2Name = gen.createTaskRunner(capabilities=[fwName])
config = gen.createConfiguration()
config._setRunners([tr2Name])
config._notify()
def simulate(config):
sanityCheck(gen, config)
job, = config.createJobs(gen.owner)
task = job.assignTask(databases.resourceDB[tr1Name])
assert task is None
assert not job.isExecutionFinished()
assert not job.hasFinalResult()
task = job.assignTask(databases.resourceDB[tr2Name])
assert task is not None
taskDone(job, task.getName())
assert job.isExecutionFinished()
assert job.hasFinalResult()
runWithReload(databases, config, simulate)
def testJobTRSetRestrictTask(databases):
"""Test Task Runner restrictions at the task level.
Two Task Runners, only one is allowed at the task level.
"""
gen = DataGenerator(databases)
fwName = gen.createFramework('testfw1')
taskName = gen.createTask('task1', fwName)
tr1Name = gen.createTaskRunner(capabilities=[fwName])
tr2Name = gen.createTaskRunner(capabilities=[fwName])
config = gen.createConfiguration()
config.getTask(taskName)._setRunners([tr2Name])
config._notify()
def simulate(config):
sanityCheck(gen, config)
job, = config.createJobs(gen.owner)
task = job.assignTask(databases.resourceDB[tr1Name])
assert task is None
assert not job.isExecutionFinished()
assert not job.hasFinalResult()
task = job.assignTask(databases.resourceDB[tr2Name])
assert task is not None
taskDone(job, task.getName())
assert job.isExecutionFinished()
assert job.hasFinalResult()
runWithReload(databases, config, simulate)
def testJobTRSetOverride(databases):
"""Test overriding Task Runner restrictions.
Two Task Runners, one allowed at the job level
and overridden at the task level.
"""
gen = DataGenerator(databases)
fwName = gen.createFramework('testfw1')
taskName = gen.createTask('task1', fwName)
tr1Name = gen.createTaskRunner(capabilities=[fwName])
tr2Name = gen.createTaskRunner(capabilities=[fwName])
config = gen.createConfiguration()
config._setRunners([tr1Name])
config.getTask(taskName)._setRunners([tr2Name])
config._notify()
def simulate(config):
sanityCheck(gen, config)
job, = config.createJobs(gen.owner)
task = job.assignTask(databases.resourceDB[tr1Name])
assert task is None
assert not job.isExecutionFinished()
assert not job.hasFinalResult()
task = job.assignTask(databases.resourceDB[tr2Name])
assert task is not None
taskDone(job, task.getName())
assert job.isExecutionFinished()
assert job.hasFinalResult()
runWithReload(databases, config, simulate)
def testJobTRSetNoCaps(databases):
"""Test that Task Runner restrictions do not override capabilities.
One Task Runner, explicitly allowed both at the task
and at the job level, but does not have required capability.
"""
gen = DataGenerator(databases)
fwName = gen.createFramework('testfw1')
taskName = gen.createTask('task1', fwName)
tr1Name = gen.createTaskRunner(capabilities=['dummy'])
config = gen.createConfiguration()
config._setRunners([tr1Name])
config.getTask(taskName)._setRunners([tr1Name])
config._notify()
def simulate(config):
sanityCheck(gen, config)
job, = config.createJobs(gen.owner)
task = job.assignTask(databases.resourceDB[tr1Name])
assert task is None
assert not job.isExecutionFinished()
assert not job.hasFinalResult()
runWithReload(databases, config, simulate)
def testJobTRSetLocalInput(databases):
"""Test that Task Runner restrictions do not override local inputs.
Two Task Runners, one is allowed at the task level,
local input is bound to the other task runner.
"""
gen = DataGenerator(databases)
prodName = gen.createProduct('input1', True)
fwName = gen.createFramework('testfw1', [prodName])
taskName = gen.createTask('task1', fwName)
tr1Name = gen.createTaskRunner(capabilities=[fwName])
tr2Name = gen.createTaskRunner(capabilities=[fwName])
config = gen.createConfiguration()
config._addInput({
'name': prodName,
'locator': 'dummy',
'localAt': tr2Name
})
config.getTask(taskName)._setRunners([tr1Name])
config._notify()
# TODO: This is a hack to prevent 'sanityCheck' from reporting an error
gen.inputsCreated = [prodName]
def simulate(config):
sanityCheck(gen, config)
job, = config.createJobs(gen.owner)
task = job.assignTask(databases.resourceDB[tr1Name])
assert task is None
assert not job.isExecutionFinished()
assert not job.hasFinalResult()
runWithReload(databases, config, simulate)
def testJobTRSetRandomRun(databases):
"""Random runs with task runner restrictions."""
class CustomGenerator(DataGenerator):
chanceChainProduct = 0.4
numTaskRunners = 5
chanceTRFramework = 0.7
chanceTRAllowedForJob = 0.7
chanceTRAllowedForTask = 0.5
chanceTRSetOverride = 0.4
def frameworksForTaskRunner(self):
return [
framework for framework in self.frameworks
if self.rnd.random() < self.chanceTRFramework
]
def createConfiguration(self):
def randomTRSet(chance):
return (
tr for tr in self.taskRunners
if self.rnd.random() < chance
)
config = DataGenerator.createConfiguration(self)
config._setRunners(randomTRSet(self.chanceTRAllowedForJob))
for task in config.getTasks():
if self.rnd.random() < self.chanceTRSetOverride:
task._setRunners(
randomTRSet(self.chanceTRAllowedForTask)
)
config._notify()
return config
def checkResults(gen, job):
def checkExecutionFinishedTask(task):
assert task.isDone()
taskRunners = task.getRunners() or job.getRunners()
runnerId = task['runner']
if taskRunners:
assert runnerId in taskRunners
trCaps = databases.resourceDB[runnerId].capabilities
for cap in task.getNeededCaps():
assert cap in trCaps
def allInputsReady(task):
for input in task.getInputs():
if not job.getProduct(input).isAvailable():
return False
return True
def checkTaskRunners(task, onlyThis = None):
if onlyThis is not None:
taskRunners = [onlyThis]
else:
taskRunners = task.getRunners() or job.getRunners()
for runnerId in taskRunners:
# Target is not checked here, because DataGenerator uses
# the same target for the job and all the task runners.
assert not databases.resourceDB[runnerId].capabilities \
>= task.getNeededCaps()
def checkNotDone(tasksNotDone, noTasksDone, runnerId):
#assert noTasksDone
if runnerId is None:
assert noTasksDone
else:
assert len(tasksNotDone) != 0
for task in tasksNotDone:
if allInputsReady(task):
assert runnerId not in \
(task.getRunners() or job.getRunners())
for item in job.getTaskGroupSequence():
if isinstance(item, TaskGroup):
runnerId = item.getRunnerId()
neededCaps = item.getNeededCaps()
noTasksDone = True
tasksNotDone = []
taskRunners = None
for task in item.getChildren():
runners = task.getRunners() or job.getRunners()
if runners:
if taskRunners is None:
taskRunners = set(runners)
else:
taskRunners &= runners
if task.isExecutionFinished():
checkExecutionFinishedTask(task)
assert task['runner'] == runnerId
noTasksDone = False
else:
tasksNotDone.append(task)
if taskRunners is None:
assert len(tasksNotDone) == 0
elif taskRunners:
if runnerId in taskRunners:
for task in tasksNotDone:
if allInputsReady(task):
checkTaskRunners(task, runnerId)
else:
checkNotDone(tasksNotDone, noTasksDone, runnerId)
else:
checkNotDone(tasksNotDone, noTasksDone, runnerId)
else:
task = item # item is a task
if task.isExecutionFinished():
checkExecutionFinishedTask(task)
elif allInputsReady(task):
checkTaskRunners(task)
seed = 123456789
rnd = random.Random(seed)
runs = 10
randomRuns(databases, runs, rnd, CustomGenerator, checkResults)
def testJobCombinedProduct(databases):
"""Verifies that a combined product becomes available after all tasks
that can produce it have run, whether those tasks end in "ok" or
"error".
"""
class CustomGenerator(DataGenerator):
pass
gen = CustomGenerator(databases)
image = gen.createProduct('image', False, True)
buildFw = gen.createFramework('build', [], [ image ])
testFw = gen.createFramework('test', [ image ], [])
buildTask1 = gen.createTask('build1', buildFw)
buildTask2 = gen.createTask('build2', buildFw)
testTask = gen.createTask('test', testFw)
buildTR = gen.createTaskRunner(name='tr_build', capabilities=['build'])
testTR = gen.createTaskRunner(name='tr_test', capabilities=['test'])
def simulate(config):
sanityCheck(gen, config)
job, = config.createJobs(gen.owner)
# TODO: The toXML functionality should probably be tested
# in a separate test case.
# Verify execution:
# Successfully complete first build task.
task = job.assignTask(databases.resourceDB[buildTR])
assert task is not None
assert task.getName().startswith('build')
taskDone(job, task.getName())
# Try to start test task (should fail).
task = job.assignTask(databases.resourceDB[testTR])
assert task is None
# Complete second build task, but make it fail.
task = job.assignTask(databases.resourceDB[buildTR])
assert task is not None
assert task.getName().startswith('build')
taskDone(job, task.getName(), ResultCode.ERROR)
# Try to start test task (should succeed).
task = job.assignTask(databases.resourceDB[testTR])
assert task is not None
assert task.getName() == testTask
# Successfully complete test task.
taskDone(job, testTask)
assert job.isExecutionFinished()
assert job.hasFinalResult()
# Check that locators have been stored separately.
producers = set()
for taskId, locator in job.getProduct(image).getProducers():
assert taskId.startswith('build')
assert locator == locatorForTask(taskId)
runWithReload(databases, gen.createConfiguration(), simulate)
def testJobPostponedInspection(databases):
"""Tests job execution where the results are not known at the time that
the execution finishes.
"""
class CustomGenerator(DataGenerator):
pass
gen = CustomGenerator(databases)
image = gen.createProduct('image')
buildFw = gen.createFramework('build', [], [ image ])
testFw = gen.createFramework('test', [ image ], [])
buildTask = gen.createTask('build', buildFw)
testTask1 = gen.createTask('test1', testFw)
testTask2 = gen.createTask('test2', testFw)
testTask3 = gen.createTask('test3', testFw)
tr = gen.createTaskRunner(name='tr_build',
capabilities=['build', 'test'])
def simulate(config):
sanityCheck(gen, config)
job, = config.createJobs(gen.owner)
# TODO: The toXML functionality should probably be tested
# in a separate test case.
# Verify execution:
# Successfully complete first build task.
task = job.assignTask(databases.resourceDB[tr])
assert task is not None
assert task.getName() == buildTask
taskDone(job, buildTask)
assert job.result == ResultCode.OK
assert job.getFinalResult() == None
# Successfully complete first test task, without result.
task = job.assignTask(databases.resourceDB[tr])
assert (task is not None) is not None
assert task.getName() == testTask1
taskDone(job, testTask1, ResultCode.INSPECT)
assert not job.isExecutionFinished()
assert not job.hasFinalResult()
assert job.result == ResultCode.INSPECT
assert job.getFinalResult() is None
# Successfully complete second test task, with result.
task = job.assignTask(databases.resourceDB[tr])
assert task is not None
assert task.getName() == testTask2
taskDone(job, testTask2, ResultCode.OK)
assert not job.isExecutionFinished()
assert not job.hasFinalResult()
assert job.result == ResultCode.INSPECT
assert job.getFinalResult() is None
# Successfully complete third test task, without result.
task = job.assignTask(databases.resourceDB[tr])
assert task is not None
assert task.getName() == testTask3
taskDone(job, testTask3, ResultCode.INSPECT)
assert job.isExecutionFinished()
assert not job.hasFinalResult()
assert job.result == ResultCode.INSPECT
assert job.getFinalResult() is None
# Attempt to set invalid inspection result.
with raises(ValueError):
job.inspectDone(testTask1, ResultCode.CANCELLED, 'invalid')
# Complete inspection of first task.
job.inspectDone(testTask1, ResultCode.WARNING, 'inspect 1')
assert job.isExecutionFinished()
assert not job.hasFinalResult()
assert job.result == ResultCode.INSPECT
assert job.getFinalResult() is None
# Attempt to change inspection result.
with raises(IllegalStateError):
job.inspectDone(testTask1, ResultCode.OK, 'invalid')
# Complete inspection of third task.
job.inspectDone(testTask3, ResultCode.OK, 'inspect 3')
assert job.isExecutionFinished()
assert job.hasFinalResult()
assert job.result == ResultCode.WARNING
assert job.getFinalResult() == ResultCode.WARNING
runWithReload(databases, gen.createConfiguration(), simulate)
def testJobTRLostWhileRunning(databases):
"""Test what happens when a busy Task Runner is lost."""
gen = DataGenerator(databases)
fwName = gen.createFramework('testfw1')
taskName = gen.createTask('task1', fwName)
trName = gen.createTaskRunner(capabilities=[fwName])
config = gen.createConfiguration()
sanityCheck(gen, config)
job, = config.createJobs(gen.owner)
runner = databases.resourceDB[trName]
task = job.assignTask(runner)
assert task is not None
assert task.isRunning()
runner.markLost()
assert not task.isRunning()
assert task.result == ResultCode.ERROR
def testJobTRRemovedWhileRunning(databases):
"""Test what happens when a busy Task Runner is removed."""
gen = DataGenerator(databases)
fwName = gen.createFramework('testfw1')
taskName = gen.createTask('task1', fwName)
trName = gen.createTaskRunner(capabilities=[fwName])
config = gen.createConfiguration()
sanityCheck(gen, config)
job, = config.createJobs(gen.owner)
runner = databases.resourceDB[trName]
task = job.assignTask(runner)
assert task is not None
assert task.isRunning()
databases.resourceDB.remove(runner)
assert not task.isRunning()
assert task.result == ResultCode.ERROR
def testJobResourceRemovedWhileRunning(databases):
"""Test what happens when a non-TR resource removed while in use.
Unlike with a TR, this is not a reason to fail the task, since it
may be possible for the task to complete successfully.
For example, removal of the resource may simply be the resource
management being moved outside of SoftFab.
"""
gen = DataGenerator(databases)
resType = gen.createResourceType(pertask=True)
fwName = gen.createFramework(
name='testfw1',
resources= [('ref1', resType, ())]
)
taskName = gen.createTask('task1', fwName)
trName = gen.createTaskRunner(capabilities=[fwName])
config = gen.createConfiguration()
resName = gen.createResource(resType)
sanityCheck(gen, config)
job, = config.createJobs(gen.owner)
runner = databases.resourceDB[trName]
resource = databases.resourceDB[resName]
task = job.assignTask(runner)
assert task is not None
assert task.isRunning()
assert resource.isReserved()
databases.resourceDB.remove(resource)
assert task.isRunning()
assert task.result is None
taskDone(job, taskName, ResultCode.OK)
assert job.isExecutionFinished()
assert job.hasFinalResult()
assert job.result == ResultCode.OK
assert job.getFinalResult() == ResultCode.OK
|
20,491 | 9f2c8a28b68de81ebb2cbd864708ab625a17e1b0 | import tensorflow as tf
import numpy as np
def cross_entropy_loss(preds, labels):
e = 10e-6
softmax_pred = tf.nn.softmax(preds)
loss = tf.reduce_mean(-tf.reduce_sum(labels * tf.log(softmax_pred + e), 1) -
tf.reduce_sum((1 - labels) * tf.log(1 - softmax_pred + e), 1))
return loss
def calculate_accuracy(preds, labels):
pred_class = np.argmax(preds, 1)
index = [i for i in range(0, len(labels)) if pred_class[i] == labels[i]]
return len(index) / float(preds.shape[0]) |
20,492 | 1de088c1fe10feb8fd0b4c52553f61ca24970b48 | from itertools import product, compress
from math import prod
def calc_P_Sys(N: int, source: str, probs: list):
with open(source, 'r') as data:
table = data.readlines()[1:]
table = [list(map(lambda x: int(x), elem.rstrip("\n").split(", "))) for elem in table]
if len(table) - 2 != N:
print("Wrong input data!")
else:
if all([True if 0 <= p <= 1 else False for p in probs]):
vertices = [i for i in range(len(table[0])) if table[0][i] == 1]
sts = [int('1' + '0' * (elem - 1) + '1' + '0' * ((len(table) - 1) - elem), 2) for elem in vertices]
vital = []
while len(vertices) != 0:
next_vertices = []
next_sts = []
start = 0
for vt, i in enumerate(vertices):
next_vertices += [j for j in range(len(table[i])) if table[i][j] == 1 and bin(sts[vt])[2:][j] == '0']
next_sts += [sts[vt] + int("0" * elem + "1" + "0" * ((len(table) - 1) - elem), 2)
for elem in next_vertices[start:]]
start = len(next_vertices)
sts = next_sts
vertices = next_vertices
vital += [sts[i] for i in range(len(vertices)) if vertices[i] == len(table) - 1]
sts = [state for state in sts if state not in vital]
vertices = [ix for ix in vertices if ix != len(table) - 1]
vital = [int(bin(elem)[2:][1: -1], 2) for elem in vital]
P_sys = 0
work_states = []
for path in vital:
all_states = product(range(2), repeat=len(table) - 2)
for state in all_states:
mask = int("".join(list(map(lambda x: str(x), state))), 2)
if path & mask == path:
work_states.append(mask)
work_states = list(set(work_states))
elems = [f'E{i}' for i in range(1, len(table) - 1)]
print("\nAll possible paths from start to end:")
for path in vital:
bin_mask = list(map(lambda x: int(x), list(bin(path)[2:])))
possible_path = compress(elems, bin_mask)
print(" -> ".join(possible_path))
title_str = "| " + " | ".join([f'E{i}' for i in range(1, len(table) - 1)] + [""]) + "P".center(14) + "|"
print("\nAll working states and their probabilities:")
print(title_str)
for state in work_states:
binary = bin(state)[2:]
binary_state = list(binary.rjust(len(table) - 2, '0'))
element_probs = [p if binary_state[i] == '1' else 1 - p for i, p in enumerate(probs)]
state_prob = prod(element_probs)
print('-' * len(title_str))
print("| " + " | ".join(binary_state + [""]) + f"{state_prob:e}".center(14) + "|")
P_sys += state_prob
print(f"Number of all working states: {len(work_states)}")
return P_sys
else:
print("Incorrect probabilities!")
if __name__ == "__main__":
N = input("Input number of elements: ")
while not N.isnumeric():
N = input("Incorrect value, try again:")
N = int(N)
while True:
ps = input("Input probabilities: ").strip()
try:
ps = list(map(lambda x: float(x), ps.split(" ")))
if len(ps) == N:
break
else:
print("Incorrect number of probabilities!")
except ValueError:
print("Incorrect input")
P_sys = calc_P_Sys(N, 'data.txt', ps)
print(f"Total system reliability: {P_sys}")
|
20,493 | d483e63e36dfe107d0ccc9293bc42b99a0e53deb | from datetime import datetime
from functools import wraps
import shelve
#Decorator prints the execution time of the applied function(s)
def time_execution(func):
@wraps(func)
def wrapper(*args, **kwargs):
dt1 = datetime.now()
output = func(*args, **kwargs)
dt2 = datetime.now()
print("Execution time:", dt2.microsecond-dt1.microsecond)
return output
return wrapper
# Determines the boolean type and returns all the keywords entered by user
def detect_query_type(query):
'''Determines the boolean type and returns all the keywords entered by user'''
qtype = 'and' #default query type
query = query.lower()
and_in_there = ' and ' in query
or_in_there = ' or ' in query
#set qtype to 'or' if that is present and the only query
if or_in_there and not and_in_there:
qtype = 'or'
#remove duplicate words
keywords = set(query.split())
if and_in_there:
keywords.remove('and')
if or_in_there:
keywords.remove('or')
return qtype, keywords
# Performs search
@time_execution
def search(shelf, qtype, keywords):
s = shelve.open(shelf)
print("Performing {} search for {}".format(qtype.upper(), keywords))
for i,word in enumerate(keywords):
if i == 0:
try:
output = s[word]
except KeyError:
output = set()
else:
if word in s:
if qtype == 'and':
output = output.intersection(s[word])
elif qtype == 'or':
output = output.union(s[word])
output = list(output)
output.sort()
return output
#for found in output:
# print("Found at ", found, ': ' + data_list[found]) |
20,494 | 73ff622b20dc2eaf1c8b36853fe7ad50a91d8d13 | # Capitalization matters in equality testing. This prints False:
print 'Audi' == 'audi'
# This prints True
print 'Audi'.lower() == 'audi'
# Multiple contitions are chained with and/or
print 36 == 37 and 36 == 36
print 36 == 37 or 36 == 36
# Checking for value in list
my_age = 36
print my_age in range(30,40)
# Checking for value not in list
print my_age not in range(20,30)
# If statements also use the colon. Remember it:
if my_age < 30:
print 'A few more years to go'
# elif is a funny word
elif my_age > 40:
print 'You had your fun'
else:
print 'You are in yor best age'
|
20,495 | 3388ea74a7247a4d2bc1b729987fd8490571aa5f | from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import pyautogui, pyperclip
import time, os
###- https://www.selenium.dev/selenium/docs/api/py/webdriver_support/selenium.webdriver.support.expected_conditions.html
###- https://sites.google.com/a/chromium.org/chromedriver/downloads
class SeleniumHelper:
TAG_BODY = 'body'
TAG_SELECT = 'select'
TAG_OPTION = 'option'
TAG_HEADER = 'header'
TAG_BUTTON = 'button'
TAG_IMPUT = 'input'
TAG_TABLE = 'table'
TAG_FORM = 'form'
TAG_PRE = 'pre'
TAG_BR = 'br'
ATTRIBUTE_HREF = 'href'
def __init__(self,globals,waittingTime=2):
self.globals = globals
self.pyautogui = pyautogui
self.time = time
self.pyautogui.FAILSAFE = True
self.pyperclip = pyperclip
self.waittingTime = waittingTime
self.fractionOfWaittingTime = waittingTime / 7.0
self.driverPath = f'{self.globals.apiPath}api{self.globals.OS_SEPARATOR}resource{self.globals.OS_SEPARATOR}dependency{self.globals.OS_SEPARATOR}chromedriver.exe'
self.aKey = 'a'
self.closeBraceKey = '}'
def newDriver(self):
try :
self.closeDriver()
except Exception as exception :
self.globals.debug(f'{self.globals.ERROR}Failed to close driver. Cause: {str(exception)}')
try :
try :
self.driver = webdriver.Chrome(ChromeDriverManager().install()) ### webdriver.Chrome(executable_path=self.driverPath)
except Exception as exception :
self.globals.debug(f'Failed to load web driver from default library. Going for a second attempt by another library. Cause: {str(exception)}')
self.driver = webdriver.Chrome(executable_path=self.driverPath)
self.wait()
return self.driver.find_element_by_tag_name(self.TAG_BODY)
except Exception as exception :
self.globals.debug(f'Failed to creat a new driver. Cause: {str(exception)}')
def reset(self):
try :
self.driver.switch_to.default_content();
self.wait(fraction=True)
except Exception as exception :
self.globals.debug(f'Failed to reset driver. Cause: {str(exception)}')
def closeDriver(self):
try :
self.driver.close()
except Exception as exception :
self.globals.debug(f'Failed to close driver. Cause: {str(exception)}')
def wait(self,fraction=False,processingTime=None):
if fraction :
self.time.sleep(self.fractionOfWaittingTime)
elif processingTime :
self.time.sleep(processingTime)
else :
self.time.sleep(self.waittingTime)
def copyPasteAutoguiAfterElementClicked(self,text):
try :
self.pyperclip.copy(text)
self.pyautogui.hotkey("ctrl", "v")
self.wait()
except Exception as exception :
self.globals.debug(f'Failed to copy paste text (by pyautogui). Cause: {str(exception)}')
def paste(self,text,elementRequest):
try :
os.system("echo %s| clip" % text.strip())
elementRequest.send_keys(Keys.CONTROL, 'v')
self.wait()
except Exception as exception :
self.globals.debug(f'Failed to paste text to the element. Cause: {str(exception)}')
def getDriver(self,elementRequest):
try :
self.wait(fraction=True)
if elementRequest :
return elementRequest
else :
return self.driver
except Exception as exception :
self.globals.debug(f'Failed to get driver. Cause: {str(exception)}')
def accessUrl(self,url,waittingTime=0,acceptAlert=False,ignoreAlert=False):
try :
self.driver.get(url)
self.wait()
self.wait(processingTime = waittingTime)
self.handleAlertBox(waittingTime=waittingTime,acceptAlert=acceptAlert,ignoreAlert=ignoreAlert)
self.driver.find_element_by_tag_name(self.TAG_BODY)
return self.driver
except Exception as exception :
self.globals.debug(f'Failed to access url. Cause: {str(exception)}')
def refreshPage(self):
try :
self.driver.refresh()
self.wait()
except Exception as exception :
print(f'{self.globals.ERROR}Failed to refresh page. Cause: {str(exception)}')
def handleAlertBox(self,waittingTime=0,acceptAlert=False,ignoreAlert=False):
resolved = False
self.wait(processingTime = waittingTime)
try :
if ignoreAlert :
self.driver.switch_to.alert.ignore()
resolved = True
elif acceptAlert :
self.driver.switch_to.alert.accept()
resolved = True
return resolved
except Exception as exception :
self.globals.debug(f'No alertFound. Cause: {str(exception)}')
return resolved
def findButton(self,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_tag_name(self.TAG_BUTTON)
return element
except Exception as exception :
self.globals.debug(f'Failed to find button. Cause: {str(exception)}')
def accessButton(self,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_tag_name(self.TAG_BUTTON)
element = element.click()
self.wait(fraction=True)
return element
except Exception as exception :
self.globals.debug(f'Failed to access button. Cause: {str(exception)}')
def findById(self,id,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_id(id)
return element
except Exception as exception :
self.globals.debug(f'Failed to find by id. Cause: {str(exception)}')
def findByClass(self,cssClass,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_class_name(cssClass)
return element
except Exception as exception :
self.globals.debug(f'Failed to find by class. Cause: {str(exception)}')
def accessClass(self,cssClass,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_class_name(cssClass)
element = element.click()
self.wait(fraction=True)
return element
except Exception as exception :
self.globals.debug(f'Failed to access class. Cause: {str(exception)}')
def accessTag(self,tagName,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_tag_name(tagName)
element = element.click()
self.wait(fraction=True)
return element
except Exception as exception :
self.globals.debug(f'Failed to access tag. Cause: {str(exception)}')
def getTextByClass(self,cssClass,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_class_name(cssClass)
return element.text
except Exception as exception :
self.globals.debug(f'Failed to get text by class. Cause: {str(exception)}')
def getTextBySelector(self,selector,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_xpath(selector)
return element.text
except Exception as exception :
self.globals.debug(f'Failed to get text by selector. Cause: {str(exception)}')
def findButtonByClass(self,cssClass,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_css_selector(f'{self.TAG_BUTTON}.{cssClass}')
return element
except Exception as exception :
self.globals.debug(f'Failed to find button by class. Cause: {str(exception)}')
def accessButtonByClass(self,cssClass,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_css_selector(f'{self.TAG_BUTTON}.{cssClass}')
element = element.click()
self.wait(fraction=True)
return element
except Exception as exception :
self.globals.debug(f'Failed to access button by class. Cause: {str(exception)}')
def accesHiperLink(self,hiperLink,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_link_text(hiperLink)
###- element = driver.find_element_by_partial_link_text(hiperLink)
element = element.click()
self.wait(fraction=True)
return element
except Exception as exception :
self.globals.debug(f'Failed to access hyperlink. Cause: {str(exception)}')
def accessId(self,id,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_id(id)
element = element.click()
self.wait(fraction=True)
return element
except Exception as exception :
self.globals.debug(f'Failed to access id. Cause: {str(exception)}')
def selectAllByClass(self,cssClass,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_class_name(cssClass)
element.send_keys(Keys.CONTROL, self.aKey)
self.wait(fraction=True)
return element
except Exception as exception :
self.globals.debug(f'Failed to select all by class. Cause: {str(exception)}')
def typeIn(self,text,elementRequest):
try :
driver = self.getDriver(elementRequest)
driver.send_keys(Keys.CONTROL, self.aKey)
driver.send_keys(text)
self.wait(fraction=True)
return driver
except Exception as exception :
self.globals.debug(f'Failed to type in. Cause: {str(exception)}')
def typeInAndHitEnter(self,text,elementRequest):
try :
driver = self.getDriver(elementRequest)
driver.send_keys(Keys.CONTROL, self.aKey)
driver.send_keys(text)
driver.send_keys(Keys.RETURN)
self.wait(fraction=True)
return driver
except Exception as exception :
self.globals.debug(f'Failed to type in. Cause: {str(exception)}')
def hitEnter(self,elementRequest):
try :
driver = self.getDriver(elementRequest)
driver.send_keys(Keys.RETURN)
return driver
except Exception as exception :
self.globals.debug(f'Failed to hit enter. Cause: {str(exception)}')
def typeInSwagger(self,text,elementRequest):
try :
filteredText = text.strip()
driver = self.getDriver(elementRequest)
driver.send_keys(Keys.CONTROL, self.aKey)
driver.send_keys(Keys.BACKSPACE)
driver.send_keys(Keys.ARROW_LEFT)
driver.send_keys(filteredText[0])
driver.send_keys(Keys.ARROW_LEFT)
driver.send_keys(Keys.BACKSPACE)
driver.send_keys(Keys.ARROW_RIGHT)
driver.send_keys(text.strip()[1:])
driver.send_keys(Keys.DELETE)
self.wait(fraction=True)
return driver
except Exception as exception :
self.globals.debug(f'Failed to type in swagger. Cause: {str(exception)}')
def findByTag(self,tagName,elementRequest):
try :
driver = self.getDriver(elementRequest)
return driver.find_element_by_tag_name(tagName)
except Exception as exception :
self.globals.debug(f'Failed to find by tag. Cause: {str(exception)}')
def findBySelector(self,selector,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_xpath(selector)
return element
except Exception as exception :
self.globals.debug(f'Failed to find by selector. Cause: {str(exception)}')
def accessSelector(self,selector,elementRequest):
try :
driver = self.getDriver(elementRequest)
element = driver.find_element_by_xpath(selector)
element = element.click()
self.wait(fraction=True)
return element
except Exception as exception :
self.globals.debug(f'Failed to access selector. Cause: {str(exception)}')
def findAllByClass(self,className,elementRequest):
try :
driver = self.getDriver(elementRequest)
return driver.find_elements_by_class_name(className)
except Exception as exception :
self.globals.debug(f'Failed to find all by class. Cause: {str(exception)}')
def findAllByTag(self,tagName,elementRequest):
try :
driver = self.getDriver(elementRequest)
return driver.find_elements_by_tag_name(tagName)
except Exception as exception :
self.globals.debug(f'Failed to find all by tag. Cause: {str(exception)}')
def findAllBySelector(self,selector,elementRequest):
try :
driver = self.getDriver(elementRequest)
return driver.find_elements_by_xpath(selector)
except Exception as exception :
self.globals.debug(f'Failed to find all by class. Cause: {str(exception)}')
def clickElement(self,elementRequest):
try :
elementRequest.click()
except Exception as exception :
self.globals.debug(f'Failed to click element {str(element)}. Cause: {str(exception)}')
def calculateAndClick(self,position,fatherSize):
try :
windowX = self.driver.execute_script("return window.screenX")
windowY = self.driver.execute_script("return window.screenY")
windowOuterWidth = self.driver.execute_script("return window.outerWidth")
windowOuterHeight = self.driver.execute_script("return window.outerHeight")
windowInnerWidth = self.driver.execute_script("return window.innerWidth")
windowInnerHeight = self.driver.execute_script("return window.innerHeight")
windowScrollX = self.driver.execute_script("return window.scrollX")
windowScrollY = self.driver.execute_script("return window.scrollY")
bottonWidth = (windowOuterWidth - windowInnerWidth) / 2
position[0] += int(windowX + (windowInnerWidth - fatherSize[0]) / 2 - bottonWidth)
position[1] += int(windowY + (windowOuterHeight - windowInnerHeight - bottonWidth) + (windowInnerHeight - fatherSize[1]) / 2 + 1.5 * bottonWidth)
self.pyautogui.moveTo(position[0],position[1])
self.pyautogui.click()
return position
except Exception as exception :
self.globals.debug(f'Failed to return {element}. Cause: {str(exception)}')
|
20,496 | e390cbec0893e5c12a3bd24b16b87d2759587f93 | __FILENAME__ = argpaser
from argparse import ArgumentParser
def create_parser():
parser = ArgumentParser(
description='弹幕字幕下载和转换工具',
prefix_chars='-+')
add_arg = parser.add_argument_group('输入输出').add_argument
add_arg('url',
help='视频地址',
type=str)
add_arg('-o', '--output-filename',
metavar='FILENAME',
help='输出文件,默认为视频标题',
type=str,
default=None)
add_arg('-p', '--create-playlist',
help='同时输出播放列表',
action='store_true')
add_arg = parser.add_argument_group('弹幕选项').add_argument
add_arg('-a', '--assist-params',
metavar='NAME1=1,NAME2=2',
help='辅助参数,手动指定无法直接获取的参数',
type=str,
default=None)
add_arg('-f', '--custom-filter',
metavar='FILE',
help='过滤文件,关键词过滤规则文件名',
type=str,
default=None)
add_arg('-B', '--disable-bottom-filter',
help='不要过滤底部弹幕',
action='store_true')
add_arg('-G', '--disable-guest-filter',
help='不要过滤游客弹幕',
action='store_true')
add_arg('-V', '--disable-video-filter',
help='不要过滤云屏蔽弹幕',
action='store_true')
add_arg('-s', '--skip-patch',
help='跳过补丁,起始位置偏移到正片位置',
action='store_true')
add_arg('-m', '--merge-parts',
help='合并分段,把页面的分段视频为同一个视频',
action='store_true')
add_arg = parser.add_argument_group('字幕选项').add_argument
add_arg('+r', '--play-resolution',
metavar='WIDTHxHEIGHT',
help='播放分辨率,默认为 %(default)s',
type=str,
default='1920x1080')
add_arg('+f', '--font-name',
metavar='NAME',
help='字体名称,默认为自动选择',
type=str,
default=None)
add_arg('+s', '--font-size',
metavar='SIZE',
help='字体大小,默认为 %(default)s 像素',
type=int,
default=32)
add_arg('+l', '--line-count',
metavar='COUNT',
help='限制行数,默认为 %(default)s 行',
type=int,
default=4)
add_arg('+a', '--layout-algorithm',
metavar='NAME',
help='布局算法,默认为 %(default)s 算法',
type=str,
choices=('sync', 'async'),
default='sync')
add_arg('+t', '--tune-duration',
metavar='SECONDS',
help='微调时长,默认为 %(default)s 秒',
type=int,
default=0)
add_arg('+d', '--drop-offset',
metavar='SECONDS',
help='丢弃偏移,默认为 %(default)s 秒',
type=int,
default=5)
add_arg('+b', '--bottom-margin',
metavar='HEIGHT',
help='底部边距,默认为 %(default)s 像素',
type=int,
default=0)
add_arg('+c', '--custom-offset',
metavar='LENGTH',
help='自定偏移',
type=str,
default='0')
add_arg('+h', '--header-file',
metavar='FILE',
help='样式模板,ass 的样式模板文件',
type=str,
default=None)
return parser
argpaser = create_parser()
########NEW FILE########
__FILENAME__ = main
from ..libsite.producer import Producer
from ..libass.studio import Studio
from .argpaser import argpaser
def parseargs():
namespace = argpaser.parse_args()
io_keys = ('url', 'output_filename', 'create_playlist')
danmaku_keys = (
'assist_params', 'custom_filter', 'disable_bottom_filter',
'disable_guest_filter', 'disable_video_filter',
'skip_patch', 'merge_parts'
)
subtitle_keys = (
'play_resolution', 'font_name', 'font_size',
'line_count', 'layout_algorithm', 'tune_duration',
'drop_offset', 'bottom_margin', 'custom_offset', 'header_file'
)
create_args = lambda keys: {k: getattr(namespace, k) for k in keys}
io_args = create_args(io_keys)
danmaku_args = create_args(danmaku_keys)
subtitle_args = create_args(subtitle_keys)
return io_args, danmaku_args, subtitle_args
def convert(io_args, danmaku_args, subtitle_args):
url = io_args['url']
output_filename = io_args['output_filename']
create_playlist = io_args['create_playlist']
producer = Producer(danmaku_args, url)
print('--------')
print('下载文件')
print('--------')
producer.start_download()
print()
print('--------')
print('视频信息')
print('--------')
for i, video in enumerate(producer.videos):
print('#' + str(i), str(video.uid), video.title)
print('视频长度({0.play_length}) 正片位置({0.feature_start}) '
'弹幕数量({1})'
.format(video, len(video.danmakus)))
print()
producer.start_handle()
print('--------')
print('过滤情况')
print('--------')
print('屏蔽条数:底部({bottom}) + '
'游客({guest}) + 云屏蔽({video}) + 自定义({custom}) = {}'
.format(producer.blocked_count, **producer.filter_detail))
print('通过条数:总共({0.total_count}) - 屏蔽({0.blocked_count}) = '
'{0.passed_count}'.format(producer))
print()
studio = Studio(subtitle_args, producer)
studio.start_handle()
print('--------')
print('输出文件')
print('--------')
print('字幕条数:总共({0}) - 丢弃({1.droped_count}) = '
'{1.keeped_count}'
.format(len(studio.ass_danmakus), studio))
print('字幕文件:' + studio.create_ass_file(output_filename))
if create_playlist:
print('播放列表:' + studio.create_m3u_file(output_filename))
print()
def main():
convert(*parseargs())
########NEW FILE########
__FILENAME__ = danmakuframe
import os
from .tkmodules import tk, ttk, tku
class DanmakuFrame(ttk.LabelFrame):
def __init__(self, parent):
ttk.LabelFrame.__init__(self, parent, text='弹幕选项', padding=2)
self.pack(fill=tk.BOTH)
self.grid_columnconfigure(1, weight=1)
self.init_widgets()
def init_widgets(self):
self.init_assist_params_widgets()
self.init_custom_filter_widgets()
self.init_disable_bottom_filter_widgets()
self.init_disable_guest_filter_widgets()
self.init_disable_video_filter_widgets()
self.init_skip_patch_widgets()
self.init_merge_parts_widgets()
tku.add_border_space(self, 1, 1)
def init_assist_params_widgets(self):
strvar = tk.StringVar()
label = ttk.Label(self, text='辅助参数:')
entry = ttk.Entry(self, textvariable=strvar)
label.grid(row=0, column=0, sticky=tk.E)
entry.grid(row=0, column=1, sticky=tk.EW, columnspan=2)
self.assist_params_strvar = strvar
def init_custom_filter_widgets(self):
strvar = tk.StringVar()
label = ttk.Label(self, text='过滤文件:')
entry = ttk.Entry(self, textvariable=strvar)
button = ttk.Button(self, text='浏览', width=6)
label.grid(row=1, column=0, sticky=tk.E)
entry.grid(row=1, column=1, sticky=tk.EW)
button.grid(row=1, column=2, sticky=tk.W)
button['command'] = self.on_custom_filter_button_clicked
self.custom_filter_strvar = strvar
def init_disable_bottom_filter_widgets(self):
intvar = tk.IntVar()
checkbutton = ttk.Checkbutton(
self, text='不要过滤底部弹幕', variable=intvar)
checkbutton.grid(row=2, column=0, sticky=tk.W, columnspan=3)
self.disable_bottom_filter_intvar = intvar
def init_disable_guest_filter_widgets(self):
intvar = tk.IntVar()
checkbutton = ttk.Checkbutton(
self, text='不要过滤游客弹幕', variable=intvar)
checkbutton.grid(row=3, column=0, sticky=tk.W, columnspan=3)
self.disable_guest_filter_intvar = intvar
def init_disable_video_filter_widgets(self):
intvar = tk.IntVar()
checkbutton = ttk.Checkbutton(
self, text='不要过滤云屏蔽弹幕', variable=intvar)
checkbutton.grid(row=4, column=0, sticky=tk.W, columnspan=3)
self.disable_video_filter_intvar = intvar
def init_skip_patch_widgets(self):
intvar = tk.IntVar()
checkbutton = ttk.Checkbutton(self, text='跳过补丁', variable=intvar)
checkbutton.grid(row=5, column=0, sticky=tk.W, columnspan=3)
self.skip_patch_intvar = intvar
def init_merge_parts_widgets(self):
intvar = tk.IntVar()
checkbutton = ttk.Checkbutton(self, text='合并分段', variable=intvar)
checkbutton.grid(row=6, column=0, sticky=tk.W, columnspan=3)
self.merge_parts_intvar = intvar
def on_custom_filter_button_clicked(self):
current_path = self.custom_filter_strvar.get().strip()
if current_path == '':
foldername, filename = os.getcwd(), ''
else:
foldername, filename = os.path.split(current_path)
selected_path = tk.filedialog.askopenfilename(
parent=self,
title='打开文件',
initialdir=foldername,
initialfile=filename
)
if selected_path is None:
return
self.custom_filter_strvar.set(selected_path)
def values(self):
return dict(
assist_params=self.assist_params_strvar.get().strip(),
custom_filter=self.custom_filter_strvar.get().strip(),
disable_bottom_filter=self.disable_bottom_filter_intvar.get() == 1,
disable_guest_filter=self.disable_guest_filter_intvar.get() == 1,
disable_video_filter=self.disable_video_filter_intvar.get() == 1,
skip_patch=self.skip_patch_intvar.get() == 1,
merge_parts=self.merge_parts_intvar.get() == 1,
)
########NEW FILE########
__FILENAME__ = ioframe
import os
from .tkmodules import tk, ttk, tku
class IoFrame(ttk.LabelFrame):
def __init__(self, parent):
ttk.LabelFrame.__init__(self, parent, text='输入输出', padding=2)
self.pack(fill=tk.BOTH)
self.grid_columnconfigure(1, weight=1)
self.init_widgets()
def init_widgets(self):
self.init_url_widgets()
self.init_output_filename_widgets()
self.init_create_playlist_widgets()
self.init_convert_widgets()
tku.add_border_space(self, 1, 1)
def init_url_widgets(self):
strvar = tk.StringVar()
label = ttk.Label(self, text='视频地址:')
entry = ttk.Entry(self, textvariable=strvar)
label.grid(row=0, column=0, sticky=tk.E)
entry.grid(row=0, column=1, sticky=tk.EW, columnspan=2)
self.url_strvar = strvar
def init_output_filename_widgets(self):
strvar = tk.StringVar()
label = ttk.Label(self, text='输出文件:')
entry = ttk.Entry(self, textvariable=strvar)
button = ttk.Button(self, text='浏览', width=6)
label.grid(row=1, column=0, sticky=tk.E)
entry.grid(row=1, column=1, sticky=tk.EW)
button.grid(row=1, column=2, sticky=tk.W)
strvar.set(os.getcwd())
button['command'] = self.on_output_filename_button_clicked
self.output_filename_strvar = strvar
def init_create_playlist_widgets(self):
intvar = tk.IntVar()
checkbutton = ttk.Checkbutton(
self, text='同时输出播放列表', variable=intvar)
checkbutton.grid(row=3, column=0, sticky=tk.W, columnspan=3)
self.create_playlist_intvar = intvar
def init_convert_widgets(self):
button = ttk.Button(self, text='转换', width=6)
button.grid(row=3, column=2, sticky=tk.W)
button['command'] = self.on_convert_button_clicked
self.convert_button = button
def on_output_filename_button_clicked(self):
current_path = self.output_filename_strvar.get().strip()
if current_path == '':
foldername, filename = os.getcwd(), ''
elif os.path.isdir(current_path):
foldername, filename = current_path, ''
else:
foldername, filename = os.path.split(current_path)
selected_path = tk.filedialog.asksaveasfilename(
parent=self,
title='保存文件',
initialdir=foldername,
initialfile=filename
)
if selected_path is None:
return
if selected_path == '':
selected_path = os.getcwd()
self.output_filename_strvar.set(selected_path)
def on_convert_button_clicked(self):
self.event_generate('<<ConvertButtonClicked>>')
def values(self):
return dict(
url=self.url_strvar.get().strip(),
output_filename=self.output_filename_strvar.get().strip(),
create_playlist=self.create_playlist_intvar.get() == 1,
)
def enable_convert_button(self):
self.convert_button['state'] = tk.NORMAL
def disable_convert_button(self):
self.convert_button['state'] = tk.DISABLED
########NEW FILE########
__FILENAME__ = loggingframe
from .tkmodules import tk, ttk, tku
class LoggingFrame(ttk.LabelFrame):
def __init__(self, parent):
ttk.LabelFrame.__init__(self, parent, text='运行日志', padding=2)
self.pack(fill=tk.BOTH, expand=True)
self.grid_columnconfigure(1, weight=1)
self.init_widgets()
def init_widgets(self):
scrolledtext = ttk.ScrolledText(self, width=64)
scrolledtext.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.scrolledtext = scrolledtext
tku.add_border_space(self, 1, 1)
def get(self):
return self.scrolledtext.get()
def write(self, string):
self.scrolledtext.insert('end', string)
self.scrolledtext.see('end')
########NEW FILE########
__FILENAME__ = main
import sys
import webbrowser
import traceback
from pprint import pprint
from ..fndcli.main import convert
from .tkmodules import tk, ttk, tku
from .menubar import MenuBar
from .ioframe import IoFrame
from .danmakuframe import DanmakuFrame
from .loggingframe import LoggingFrame
from .subtitleframe import SubtitleFrame
class Application(ttk.Frame):
def __init__(self):
ttk.Frame.__init__(self, None, border=2)
self.pack(fill=tk.BOTH, expand=True)
self.init_widgets()
def init_widgets(self):
self.init_topwin()
self.init_menubar()
self.init_left_frame()
self.init_right_frame()
tku.add_border_space(self, 2, 2)
# Windows 下有个问题,窗口实例初始化后,出现在默认位置,
# 如果马上修改窗口位置,窗口还是会在默认位置闪现一下,
# 因此先隐藏起来,位置更新后再显示出来
if sys.platform.startswith('win'):
self.topwin.withdraw()
tku.move_to_screen_center(self.topwin)
self.topwin.deiconify()
else:
tku.move_to_screen_center(self.topwin)
def init_topwin(self):
self.topwin = self.winfo_toplevel()
self.topwin.title('Niconvert')
if sys.platform.startswith('win'):
icon_path = tku.asset_path('logo.ico')
self.topwin.iconbitmap(default=icon_path)
else:
icon_path = tku.asset_path('logo.gif')
self.topwin.iconphoto(self.topwin, tk.PhotoImage(file=icon_path))
self.topwin.protocol('WM_DELETE_WINDOW', self.quit)
def init_menubar(self):
# XXX Python 3.3 在 Windows XP/7 里都不能收到 bind 过的函数
# 原因不明,不想给 MenuBar 传入外部依赖 ,暂时用 MonkeyPatch 处理
if sys.platform.startswith('win'):
MenuBar.on_quit_menuitem_clicked = \
lambda s: self.on_quit_menuitem_clicked(None)
MenuBar.on_help_menuitem_clicked = \
lambda s: self.on_help_menuitem_clicked(None)
MenuBar.on_about_menuitem_clicked = \
lambda s: self.on_about_menuitem_clicked(None)
events = {
'<<QuitMenuitemClicked>>': self.on_quit_menuitem_clicked,
'<<HelpMenuitemClicked>>': self.on_help_menuitem_clicked,
'<<AboutMenuitemClicked>>': self.on_about_menuitem_clicked,
}
menubar = MenuBar(self)
for name, func in events.items():
menubar.bind(name, func)
self.topwin.config(menu=menubar)
def init_left_frame(self):
frame = ttk.Frame(self)
self.io_frame = IoFrame(frame)
self.danmaku_frame = DanmakuFrame(frame)
self.subtitle_frame = SubtitleFrame(frame)
self.io_frame.bind('<<ConvertButtonClicked>>',
self.on_convert_button_clicked)
frame.grid_columnconfigure(1, weight=1)
frame.pack(side=tk.LEFT, fill=tk.BOTH)
def init_right_frame(self):
frame = ttk.Frame(self)
self.logging_frame = LoggingFrame(frame)
frame.grid_columnconfigure(1, weight=1)
frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
def get_convert_args_list(self):
io_args = self.io_frame.values()
danmaku_args = self.danmaku_frame.values()
subtitle_args = self.subtitle_frame.values()
if sys.stdout:
pprint(io_args)
pprint(danmaku_args)
pprint(subtitle_args)
return (io_args, danmaku_args, subtitle_args)
def on_convert_button_clicked(self, event):
args_list = self.get_convert_args_list()
if args_list[0]['url'] == '':
return
# TODO 使用线程
orig_stdout = sys.stdout
orig_stderr = sys.stderr
self.io_frame.disable_convert_button()
sys.stdout = self.logging_frame
try:
print('========')
print('开始转换')
print('========')
print()
convert(*args_list)
except:
print(traceback.format_exc())
self.io_frame.enable_convert_button()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def on_quit_menuitem_clicked(self, event):
self.quit()
def on_help_menuitem_clicked(self, event):
webbrowser.open('https://github.com/muzuiget/niconvert/wiki')
def on_about_menuitem_clicked(self, event):
webbrowser.open('https://github.com/muzuiget/niconvert#readme')
def main():
app = Application()
app.mainloop()
########NEW FILE########
__FILENAME__ = menubar
from .tkmodules import tk
class MenuBar(tk.Menu):
def __init__(self, parent):
tk.Menu.__init__(self, parent)
self.init_widgets()
def init_widgets(self):
file_menu = tk.Menu(self, tearoff=0)
file_menu.add_command(
label='退出(Q)', underline=3,
command=self.on_quit_menuitem_clicked)
help_menu = tk.Menu(self, tearoff=0)
help_menu.add_command(
label='帮助(O)', underline=3,
command=self.on_help_menuitem_clicked)
help_menu.add_command(
label='关于(A)', underline=3,
command=self.on_about_menuitem_clicked)
self.add_cascade(label='文件(F)', menu=file_menu, underline=3)
self.add_cascade(label='帮助(H)', menu=help_menu, underline=3)
def on_quit_menuitem_clicked(self):
self.event_generate('<<QuitMenuitemClicked>>')
def on_help_menuitem_clicked(self):
self.event_generate('<<HelpMenuitemClicked>>')
def on_about_menuitem_clicked(self):
self.event_generate('<<AboutMenuitemClicked>>')
########NEW FILE########
__FILENAME__ = subtitleframe
import os
import sys
from .tkmodules import tk, ttk, tku
class SubtitleFrame(ttk.LabelFrame):
def __init__(self, parent):
ttk.LabelFrame.__init__(self, parent, text='字幕选项', padding=2)
self.pack(fill=tk.BOTH)
self.grid_columnconfigure(1, weight=1)
self.init_widgets()
def init_widgets(self):
self.init_play_resolution_widgets()
self.init_font_name_widgets()
self.init_font_size_widgets()
self.init_line_count_widgets()
self.init_layout_algorithm_widgets()
self.init_tune_duration_widgets()
self.init_drop_offset_widgets()
self.init_bottom_margin_widgets()
self.init_custom_offset_widgets()
self.init_header_file_widgets()
tku.add_border_space(self, 1, 1)
def init_play_resolution_widgets(self):
label = ttk.Label(self, text='分辨率:')
box = ResolutionBox(self)
label1 = ttk.Label(self, text='像素')
label.grid(row=0, column=0, sticky=tk.E)
box.grid(row=0, column=1, sticky=tk.EW)
label1.grid(row=0, column=2, sticky=tk.W)
box.set('1920x1080')
self.play_resolution_box = box
def init_font_name_widgets(self):
fonts = list(tk.font.families(self))
fonts = list(set(fonts))
fonts.sort()
strvar = tk.StringVar()
label = ttk.Label(self, text='字体名称:')
combobox = ttk.Combobox(self, textvariable=strvar, values=fonts)
label.grid(row=1, column=0, sticky=tk.E)
combobox.grid(row=1, column=1, sticky=tk.EW, columnspan=2)
if sys.platform == 'linux':
strvar.set('WenQuanYi Micro Hei')
else:
strvar.set('微软雅黑')
self.font_name_strvar = strvar
def init_font_size_widgets(self):
label = ttk.Label(self, text='字体大小:')
spinbox = tk.Spinbox(self, justify=tk.RIGHT, from_=1, to=100)
label1 = ttk.Label(self, text='像素')
label.grid(row=2, column=0, sticky=tk.E)
spinbox.grid(row=2, column=1, sticky=tk.EW)
label1.grid(row=2, column=2, sticky=tk.W)
spinbox.delete(0, tk.END)
spinbox.insert(0, 32)
self.font_size_spinbox = spinbox
def init_line_count_widgets(self):
label = ttk.Label(self, text='限制行数:')
spinbox = tk.Spinbox(self, justify=tk.RIGHT, from_=0, to=100)
label1 = ttk.Label(self, text='行')
label.grid(row=3, column=0, sticky=tk.E)
spinbox.grid(row=3, column=1, sticky=tk.EW)
label1.grid(row=3, column=2, sticky=tk.W)
spinbox.delete(0, tk.END)
spinbox.insert(0, 4)
self.line_count_spinbox = spinbox
def init_layout_algorithm_widgets(self):
label = ttk.Label(self, text='布局算法:')
box = AlgorithmBox(self)
label.grid(row=4, column=0, sticky=tk.E)
box.grid(row=4, column=1, sticky=tk.EW, columnspan=2)
box.set('sync')
self.layout_algorithm_box = box
def init_tune_duration_widgets(self):
label = ttk.Label(self, text='微调时长:')
spinbox = tk.Spinbox(self, justify=tk.RIGHT, from_=-10, to=100)
label1 = ttk.Label(self, text='秒')
label.grid(row=5, column=0, sticky=tk.E)
spinbox.grid(row=5, column=1, sticky=tk.EW)
label1.grid(row=5, column=2, sticky=tk.W)
spinbox.delete(0, tk.END)
spinbox.insert(0, 0)
self.tune_duration_spinbox = spinbox
def init_drop_offset_widgets(self):
label = ttk.Label(self, text='丢弃偏移:')
spinbox = tk.Spinbox(self, justify=tk.RIGHT, from_=0, to=100)
label1 = ttk.Label(self, text='秒')
label.grid(row=6, column=0, sticky=tk.E)
spinbox.grid(row=6, column=1, sticky=tk.EW)
label1.grid(row=6, column=2, sticky=tk.W)
spinbox.delete(0, tk.END)
spinbox.insert(0, 5)
self.drop_offset_spinbox = spinbox
def init_bottom_margin_widgets(self):
label = ttk.Label(self, text='底部边距:')
spinbox = tk.Spinbox(self, justify=tk.RIGHT, from_=0, to=100)
label1 = ttk.Label(self, text='像素')
label.grid(row=7, column=0, sticky=tk.E)
spinbox.grid(row=7, column=1, sticky=tk.EW)
label1.grid(row=7, column=2, sticky=tk.W)
spinbox.delete(0, tk.END)
spinbox.insert(0, 0)
self.bottom_margin_spinbox = spinbox
def init_custom_offset_widgets(self):
strvar = tk.StringVar()
label = ttk.Label(self, text='自定偏移:')
entry = ttk.Entry(self, textvariable=strvar, justify=tk.RIGHT)
label1 = ttk.Label(self, text='秒')
label.grid(row=8, column=0, sticky=tk.E)
entry.grid(row=8, column=1, sticky=tk.EW)
label1.grid(row=8, column=2, sticky=tk.W)
strvar.set('0')
self.custom_offset_strvar = strvar
def init_header_file_widgets(self):
strvar = tk.StringVar()
label = ttk.Label(self, text='样式模板:')
entry = ttk.Entry(self, textvariable=strvar)
button = ttk.Button(self, text='浏览', width=6)
label.grid(row=9, column=0, sticky=tk.E)
entry.grid(row=9, column=1, sticky=tk.EW)
button.grid(row=9, column=2, sticky=tk.W)
button['command'] = self.on_header_file_button_clicked
self.header_file_strvar = strvar
def on_header_file_button_clicked(self):
current_path = self.header_file_strvar.get().strip()
if current_path == '':
foldername, filename = os.getcwd(), ''
else:
foldername, filename = os.path.split(current_path)
selected_path = tk.filedialog.askopenfilename(
parent=self,
title='打开文件',
initialdir=foldername,
initialfile=filename
)
if selected_path is None:
return
self.header_file_strvar.set(selected_path)
def values(self):
return dict(
play_resolution=self.play_resolution_box.get().strip(),
font_name=self.font_name_strvar.get().strip(),
font_size=int(self.font_size_spinbox.get()),
line_count=int(self.line_count_spinbox.get()),
layout_algorithm=self.layout_algorithm_box.get(),
tune_duration=int(self.tune_duration_spinbox.get()),
drop_offset=int(self.drop_offset_spinbox.get()),
bottom_margin=int(self.bottom_margin_spinbox.get()),
custom_offset=self.custom_offset_strvar.get().strip(),
header_file=self.header_file_strvar.get().strip(),
)
class ResolutionBox(ttk.Frame):
def __init__(self, parent):
ttk.Frame.__init__(self, parent)
self.init_widgets()
def init_widgets(self):
width_spinbox = tk.Spinbox(
self, justify=tk.RIGHT, width=16, from_=1, to=9999)
label = ttk.Label(self, text='x')
height_spinbox = tk.Spinbox(
self, justify=tk.RIGHT, width=16, from_=1, to=9999)
width_spinbox.pack(side=tk.LEFT, fill=tk.BOTH)
label.pack(side=tk.LEFT)
height_spinbox.pack(side=tk.LEFT, fill=tk.BOTH)
self.width_spinbox = width_spinbox
self.height_spinbox = height_spinbox
def get(self):
width = self.width_spinbox.get()
height = self.height_spinbox.get()
return width + 'x' + height
def set(self, value):
width, height = value.split('x')
self.width_spinbox.delete(0, tk.END)
self.width_spinbox.insert(0, width)
self.height_spinbox.delete(0, tk.END)
self.height_spinbox.insert(0, height)
class AlgorithmBox(ttk.Frame):
def __init__(self, parent):
ttk.Frame.__init__(self, parent)
self.init_widgets()
def init_widgets(self):
strvar = tk.StringVar()
sync_radiobutton = ttk.Radiobutton(
self, text='速度同步', variable=strvar, value='sync')
async_radiobutton = ttk.Radiobutton(
self, text='速度异步', variable=strvar, value='async')
sync_radiobutton.pack(side=tk.LEFT)
async_radiobutton.pack(side=tk.LEFT)
self.strvar = strvar
def get(self):
return self.strvar.get()
def set(self, value):
self.strvar.set(value)
########NEW FILE########
__FILENAME__ = tkmodules
from os.path import join, dirname
import tkinter
import tkinter.ttk
import tkinter.font
import tkinter.filedialog
import tkinter.messagebox
import tkinter.scrolledtext
tk = tkinter
ttk = tkinter.ttk
# MonkeyPatch 来让 ScrolledText 用上 ttk 的组件
tk.scrolledtext.Frame = ttk.Frame
tk.scrolledtext.Scrollbar = ttk.Scrollbar
ttk.ScrolledText = tk.scrolledtext.ScrolledText
class tku(object):
@staticmethod
def add_border_space(widget, padx, pady, recursive=True):
''' 给每个 widget 增加指定像素的距离 '''
widget.pack_configure(padx=padx, pady=pady)
if recursive:
for subwidget in widget.pack_slaves():
subwidget.pack_configure(padx=padx, pady=pady)
for subwidget in widget.grid_slaves():
subwidget.grid_configure(padx=padx, pady=pady)
@staticmethod
def move_to_screen_center(win):
''' 把窗口移动到屏幕中间 '''
win.update_idletasks()
screen_width = win.winfo_screenwidth()
screen_height = win.winfo_screenheight()
window_size = win.geometry().split('+')[0]
window_width, window_height = map(int, window_size.split('x'))
x = (screen_width - window_width) // 2
y = (screen_height - window_height) // 2
y -= 40 # 状态栏大约高度
win.geometry('{:d}x{:d}+{:d}+{:d}'.format(
window_width, window_height, x, y))
@staticmethod
def asset_path(name):
return join(dirname(dirname(__file__)), 'assets', name)
########NEW FILE########
__FILENAME__ = collision
from ..libcore.utils import intceil
class Collision(object):
''' 碰撞处理 '''
def __init__(self, line_count):
self.line_count = line_count
self.leaves = self._leaves()
def _leaves(self):
return [0] * self.line_count
def detect(self, display):
''' 碰撞检测
返回行号和时间偏移
'''
beyonds = []
for i, leave in enumerate(self.leaves):
beyond = display.danmaku.start - leave
# 某一行有足够空间,直接返回行号和 0 偏移
if beyond >= 0:
return i, 0
beyonds.append(beyond)
# 所有行都没有空间了,那么找出哪一行能在最短时间内让出空间
min_beyond = min(beyonds)
line_index = beyonds.index(min_beyond)
offset = -min_beyond
return line_index, offset
def update(self, leave, line_index, offset):
''' 更新碰撞信息 '''
# 还是未能精确和播放器同步,算上 1 秒误差,让字幕稀疏一点
deviation = 1
self.leaves[line_index] = intceil(leave + offset) + deviation
########NEW FILE########
__FILENAME__ = config
import os
import sys
from ..libcore.utils import xhms2s
class Config(object):
''' 本模块的配置对象 '''
def __init__(self, args):
self.args = args
(self.screen_width,
self.screen_height) = self._screen_size()
self.font_name = self._font_name()
self.base_font_size = self._base_font_size()
self.line_count = self._line_count()
self.layout_algorithm = self._layout_algorithm()
self.tune_duration = self._tune_duration()
self.drop_offset = self._drop_offset()
self.bottom_margin = self._bottom_margin()
self.custom_offset = self._custom_offset()
self.header_template = self._header_template()
def _screen_size(self):
return map(int, self.args['play_resolution'].split('x'))
def _font_name(self):
if self.args['font_name']:
return self.args['font_name']
if sys.platform.startswith('win'):
return '微软雅黑'
else:
return 'WenQuanYi Micro Hei'
def _base_font_size(self):
return self.args['font_size']
def _line_count(self):
if self.args['line_count'] == 0:
return self.screen_height // self.base_font_size
else:
return self.args['line_count']
def _layout_algorithm(self):
return self.args['layout_algorithm']
def _tune_duration(self):
return self.args['tune_duration']
def _drop_offset(self):
return self.args['drop_offset']
def _bottom_margin(self):
return self.args['bottom_margin']
def _custom_offset(self):
return xhms2s(self.args['custom_offset'])
def _header_template(self):
if not self.args['header_file']:
if sys.platform.startswith('win'):
tpl_file = '/header-win.txt'
else:
tpl_file = '/header-unix.txt'
filename = (os.path.dirname(__file__) + tpl_file)
else:
filename = self.args['header_file']
with open(filename) as file:
lines = file.read().strip().split('\n')
lines = map(lambda l: l.strip(), lines)
header = '\n'.join(lines) + '\n'
return header
########NEW FILE########
__FILENAME__ = creater
from ..libcore.const import NOT_SUPPORT, SCROLL, TOP, BOTTOM
from .display import display_factory
from .collision import Collision
from .subtitle import Subtitle
class Creater(object):
''' 创建器 '''
def __init__(self, config, danmakus):
self.config = config
self.danmakus = danmakus
self.subtitles = self._subtitles()
self.text = self._text()
def _subtitles(self):
collisions = {
SCROLL: Collision(self.config.line_count),
TOP: Collision(self.config.line_count),
BOTTOM: Collision(self.config.line_count),
}
subtitles = []
for i, danmaku in enumerate(self.danmakus):
# 丢弃不支持的
if danmaku.style == NOT_SUPPORT:
continue
# 创建显示方式对象
display = display_factory(self.config, danmaku)
collision = collisions[danmaku.style]
line_index, waiting_offset = collision.detect(display)
# 超过容忍的偏移量,丢弃掉此条弹幕
if waiting_offset > self.config.drop_offset:
continue
# 接受偏移,更新碰撞信息
display.relayout(line_index)
collision.update(display.leave, line_index, waiting_offset)
# 再加上自定义偏移
offset = waiting_offset + self.config.custom_offset
subtitle = Subtitle(danmaku, display, offset)
subtitles.append(subtitle)
return subtitles
def _text(self):
header = self.config.header_template.format(
width=self.config.screen_width,
height=self.config.screen_height,
fontname=self.config.font_name,
fontsize=self.config.base_font_size,
)
events = (subtitle.text for subtitle in self.subtitles)
text = header + '\n'.join(events)
return text
########NEW FILE########
__FILENAME__ = display
from ..libcore.const import SCROLL, TOP, BOTTOM
from ..libcore.utils import intceil, display_length
class Display(object):
''' 显示方式 '''
def __init__(self, config, danmaku):
self.config = config
self.danmaku = danmaku
self.line_index = 0
self.font_size = self._font_size()
self.is_scaled = self._is_scaled()
self.max_length = self._max_length()
self.width = self._width()
self.height = self._height()
self.horizontal = self._horizontal()
self.vertical = self._vertical()
self.duration = self._duration()
self.leave = self._leave()
def _font_size(self):
''' 字体大小 '''
# 按用户自定义的字体大小来缩放
return intceil(self.config.base_font_size * self.danmaku.size_ratio)
def _is_scaled(self):
''' 字体是否被缩放过 '''
return self.danmaku.size_ratio != 1
def _max_length(self):
''' 最长的行字符数 '''
return max(map(display_length, self.danmaku.content.split('\n')))
def _width(self):
''' 整条字幕宽度 '''
char_count = self.max_length / 2
return intceil(self.font_size * char_count)
def _height(self):
''' 整条字幕高度 '''
line_count = len(self.danmaku.content.split('\n'))
return line_count * self.font_size
def _horizontal(self):
''' 出现和消失的水平坐标位置 '''
# 默认在屏幕中间
x = self.config.screen_width // 2
x1, x2 = x, x
return x1, x2
def _vertical(self):
''' 出现和消失的垂直坐标位置 '''
# 默认在屏幕中间
y = self.config.screen_height // 2
y1, y2 = y, y
return y1, y2
def _duration(self):
''' 整条字幕的显示时间 '''
base = 3 + self.config.tune_duration
if base <= 0:
base = 0
char_count = self.max_length / 2
if char_count < 6:
value = base + 1
elif char_count < 12:
value = base + 2
else:
value = base + 3
return value
def _leave(self):
''' 离开碰撞时间 '''
return self.danmaku.start + self.duration
def relayout(self, line_index):
''' 按照新的行号重新布局 '''
self.line_index = line_index
self.horizontal = self._horizontal()
self.vertical = self._vertical()
class TopDisplay(Display):
''' 顶部 '''
def _vertical(self):
# 这里 y 坐标为 0 就是最顶行了
y = self.line_index * self.config.base_font_size
y1, y2 = y, y
return y1, y2
class BottomDisplay(Display):
''' 底部 '''
def _vertical(self):
# 要让字幕不超出底部,减去高度
y = self.config.screen_height \
- (self.line_index * self.config.base_font_size) - self.height
# 再减去自定义的底部边距
y -= self.config.bottom_margin
y1, y2 = y, y
return y1, y2
class ScrollDisplay(Display):
''' 滚动 '''
def __init__(self, config, danmaku):
self.config = config
self.danmaku = danmaku
self.line_index = 0
self.font_size = self._font_size()
self.is_scaled = self._is_scaled()
self.max_length = self._max_length()
self.width = self._width()
self.height = self._height()
self.horizontal = self._horizontal()
self.vertical = self._vertical()
self.distance = self._distance()
self.speed = self._speed()
self.duration = self._duration()
self.leave = self._leave()
def _horizontal(self):
# ASS 的水平位置参考点是整条字幕文本的中点
x1 = self.config.screen_width + self.width // 2
x2 = -self.width // 2
return x1, x2
def _vertical(self):
base_font_size = self.config.base_font_size
# 垂直位置,按基准字体大小算每一行的高度
y = (self.line_index + 1) * base_font_size
# 个别弹幕可能字体比基准要大,所以最上的一行还要避免挤出顶部屏幕
# 坐标不能小于字体大小
if y < self.font_size:
y = self.font_size
y1, y2 = y, y
return y1, y2
def _distance(self):
''' 字幕坐标点的移动距离 '''
x1, x2 = self.horizontal
return x1 - x2
def _speed(self):
''' 字幕每个字的移动的速度 '''
# 基准时间,就是每个字的移动时间
# 12 秒加上用户自定义的微调
base = 12 + self.config.tune_duration
if base <= 0:
base = 0
return intceil(self.config.screen_width / base)
def _sync_duration(self):
''' 计算每条弹幕的显示时长,同步方式
每个弹幕的滚动速度都一样,辨认度好,适合观看剧集类视频。
'''
return self.distance / self.speed
def _async_duration(self):
''' 计算每条弹幕的显示时长,异步方式
每个弹幕的滚动速度都不一样,动态调整,辨认度低,适合观看 MTV 类视频。
'''
base = 6 + self.config.tune_duration
if base <= 0:
base = 0
char_count = self.max_length / 2
if char_count < 6:
value = base + char_count
elif char_count < 12:
value = base + (char_count / 2)
elif char_count < 24:
value = base + (char_count / 3)
else:
value = base + 10
return value
def _duration(self):
''' 整条字幕的移动时间 '''
func_name = '_' + self.config.layout_algorithm + '_duration'
func = getattr(self, func_name)
return func()
def _leave(self):
''' 离开碰撞时间 '''
# 对于滚动样式弹幕来说,就是最后一个字符离开最右边缘的时间
# 也就是跑过半个字幕宽度的路程
speed = self.distance / self.duration
half_width = self.width * 0.5
duration = half_width / speed
return self.danmaku.start + duration
def display_factory(config, danmaku):
''' 根据弹幕样式自动创建对应的 Display 类 '''
mapping = {
SCROLL: ScrollDisplay,
TOP: TopDisplay,
BOTTOM: BottomDisplay,
}
class_type = mapping[danmaku.style]
return class_type(config, danmaku)
########NEW FILE########
__FILENAME__ = studio
import sys
from os.path import join, isdir, basename
from .config import Config
from .creater import Creater
class Studio(object):
''' 字幕工程类 '''
def __init__(self, args, producer):
self.config = Config(args)
self.producer = producer
def start_handle(self):
self.ass_danmakus = self._ass_danmakus()
self.creater = self._creater()
self.keeped_count = self._keep_count()
self.droped_count = self._droped_count()
self.play_urls = self._play_urls()
def _ass_danmakus(self):
''' 创建输出 ass 的弹幕列表 '''
return self.producer.keeped_danmakus
def _creater(self):
''' ass 创建器 '''
return Creater(self.config, self.ass_danmakus)
def _keep_count(self):
''' 保留条数 '''
return len(self.creater.subtitles)
def _droped_count(self):
''' 丢弃条数 '''
return len(self.ass_danmakus) - self.keeped_count
def create_ass_file(self, filename):
''' 创建 ass 字幕 '''
default_filename = self.default_filename('.ass')
if filename is None:
filename = default_filename
elif isdir(filename):
filename = join(filename, default_filename)
elif not filename.endswith('.ass'):
filename += '.ass'
self.create_file(filename, self.creater.text)
return basename(filename)
def _play_urls(self):
''' 播放地址 '''
urls = []
for video in self.producer.videos:
urls.extend(video.play_urls)
return urls
def create_m3u_file(self, filename):
''' 创建 m3u 播放列表 '''
default_filename = self.default_filename('.m3u')
if filename is None:
filename = default_filename
elif isdir(filename):
filename = join(filename, default_filename)
else:
if filename.endswith('.ass'):
filename = filename[:-4] + '.m3u'
else:
filename += '.m3u'
if not self.play_urls:
return ''
text = '\n'.join(self.play_urls)
self.create_file(filename, text)
return basename(filename)
def default_filename(self, suffix):
''' 创建文件全名 '''
video_title = self.producer.title.replace('/', ' ')
filename = video_title + suffix
return filename
def create_file(self, filename, text):
with open(filename, 'wb') as file:
if sys.platform.startswith('win'):
text = text.replace('\n', '\r\n')
text = text.encode('utf-8')
file.write(text)
########NEW FILE########
__FILENAME__ = subtitle
from ..libcore.const import SCROLL
from ..libcore.utils import s2hms, int2bgr, is_dark, correct_typos
DIALOGUE_TPL = '''
Dialogue: {layer},{start},{end},Danmaku,,0000,0000,0000,,{content}
'''.strip()
class Subtitle(object):
''' 字幕 '''
def __init__(self, danmaku, display, offset=0):
self.danmaku = danmaku
self.display = display
self.offset = offset
self.start = self._start()
self.end = self._end()
self.color = self._color()
self.position = self._position()
self.start_markup = self._start_markup()
self.end_markup = self._end_markup()
self.color_markup = self._color_markup()
self.border_markup = self._border_markup()
self.font_size_markup = self._font_size_markup()
self.style_markup = self._style_markup()
self.layer_markup = self._layer_markup()
self.content_markup = self._content_markup()
self.text = self._text()
def _start(self):
return self.danmaku.start + self.offset
def _end(self):
return self.start + self.display.duration
def _color(self):
return int2bgr(self.danmaku.color)
def _position(self):
x1, x2 = self.display.horizontal
y1, y2 = self.display.vertical
return dict(x1=x1, y1=y1, x2=x2, y2=y2)
def _start_markup(self):
return s2hms(self.start)
def _end_markup(self):
return s2hms(self.end)
def _color_markup(self):
# 白色不需要加特别标记
if self.color == 'FFFFFF':
return ''
else:
return '\\c&H' + self.color
def _border_markup(self):
# 暗色加个亮色边框,方便阅读
if is_dark(self.danmaku.color):
return '\\3c&HFFFFFF'
else:
return ''
def _font_size_markup(self):
if self.display.is_scaled:
return '\\fs' + str(self.display.font_size)
else:
return ''
def _style_markup(self):
if self.danmaku.style == SCROLL:
return '\\move({x1}, {y1}, {x2}, {y2})'.format(**self.position)
else:
return '\\a6\\pos({x1}, {y1})'.format(**self.position)
def _layer_markup(self):
if self.danmaku.style != SCROLL:
return '-2'
else:
return '-3'
def _content_markup(self):
markup = ''.join([
self.style_markup,
self.color_markup,
self.border_markup,
self.font_size_markup
])
content = correct_typos(self.danmaku.content)
return '{' + markup + '}' + content
def _text(self):
return DIALOGUE_TPL.format(
layer=self.layer_markup,
start=self.start_markup,
end=self.end_markup,
content=self.content_markup)
########NEW FILE########
__FILENAME__ = const
# 弹幕样式
STYLES = (
NOT_SUPPORT,
SCROLL,
TOP,
BOTTOM,
) = range(4)
########NEW FILE########
__FILENAME__ = danmaku
from .const import NOT_SUPPORT
class BaseDanmaku(object):
''' 弹幕基类 '''
def __init__(self):
# 开始时间
self.start = 0
# 位置样式
self.style = NOT_SUPPORT
# 颜色
self.color = 0xFFFFFF
# 评论者
self.commenter = ''
# 评论正文
self.content = ''
# 字体缩放比例
self.size_ratio = 1
# 是否游客弹幕
self.is_guest = False
# 是否歌词或神弹幕
self.is_applaud = False
########NEW FILE########
__FILENAME__ = fetcher
import gzip
import zlib
from urllib import request
from io import BytesIO
USER_AGENT = \
'Mozilla/5.0 (X11; Linux x86_64; rv:26.0) Gecko/20100101 Firefox/26.0'
class Fetcher(object):
def __init__(self):
self.opener = self._opener()
self.cache = {}
def _opener(self):
opener = request.build_opener()
opener.addheaders = [
('User-Agent', USER_AGENT),
('Accept-Encoding', 'gzip')
]
return opener
def decompression(self, content, encoding):
if encoding == 'gzip':
return gzip.GzipFile(fileobj=BytesIO(content), mode='rb').read()
elif encoding == 'deflate':
return zlib.decompressobj(-zlib.MAX_WBITS).decompress(content)
else:
return content
def download(self, url):
resp = self.opener.open(url)
content = resp.read()
encoding = resp.headers.get('content-encoding', None)
return self.decompression(content, encoding).decode('UTF-8')
def open(self, url, force=False):
text = self.cache.get(url)
if force or text is None:
print('下载:' + str(url))
text = self.download(url)
self.cache[url] = text
else:
print('重用:' + str(url))
return text
fetch = Fetcher().open
########NEW FILE########
__FILENAME__ = filter
import re
from .const import BOTTOM
class BaseFilter(object):
''' 过滤器基类 '''
def match(self, danmaku):
return False
class GuestFilter(BaseFilter):
''' 游客过滤器 '''
def match(self, danmaku):
return danmaku.is_guest
class BottomFilter(BaseFilter):
''' 底部样式过滤器 '''
def match(self, danmaku):
if danmaku.is_applaud:
return False
return danmaku.style == BOTTOM
class CustomFilter(BaseFilter):
''' 自定义过滤器 '''
def __init__(self, lines):
self.lines = lines
self.regexps = self._regexps()
def _regexps(self):
return list(map(re.compile, self.lines))
def match(self, danmaku):
for regexp in self.regexps:
if regexp.search(danmaku.content):
return True
return False
guest_filter = GuestFilter()
bottom_filter = BottomFilter()
########NEW FILE########
__FILENAME__ = utils
import re
import colorsys
from math import ceil
from urllib.parse import unquote
from unicodedata import east_asian_width
def intceil(number):
''' 向上取整 '''
return int(ceil(number))
def display_length(text):
''' 字符长度,1 个汉字当 2 个英文 '''
width = 0
for char in text:
width += east_asian_width(char) == 'Na' and 1 or 2
return width
def correct_typos(text):
''' 修正一些评论者的拼写错误 '''
# 错误的换行转义
text = text.replace('/n', '\\N')
text = text.replace('>', '>')
text = text.replace('<', '<')
return text
def s2hms(seconds):
''' 秒数转 时:分:秒 格式 '''
if seconds < 0:
return '0:00:00.00'
i, d = divmod(seconds, 1)
m, s = divmod(i, 60)
h, m = divmod(m, 60)
(h, m, s, d) = map(int, (h, m, s, d * 100))
return '{:d}:{:02d}:{:02d}.{:02d}'.format(h, m, s, d)
def hms2s(hms):
''' 时:分:秒 格式转 秒数 '''
nums = hms.split(':')
seconds = 0
for i in range(len(nums)):
seconds += int(nums[-i - 1]) * (60 ** i)
return seconds
def xhms2s(xhms):
''' 同上,不过可以用 +/- 符号来连接多个
即 3:00-2:30 相当于 30 秒
'''
args = xhms.replace('+', ' +').replace('-', ' -').split(' ')
result = 0
for hms in args:
seconds = hms2s(hms)
result += seconds
return result
def int2rgb(integer):
''' 颜色值,整型转 RGB '''
return hex(integer).upper()[2:].zfill(6)
def int2bgr(integer):
''' 颜色值,整型转 BGR '''
rgb = int2rgb(integer)
bgr = rgb[4:6] + rgb[2:4] + rgb[0:2]
return bgr
def int2hls(integer):
''' 颜色值,整型转 HLS '''
rgb = int2rgb(integer)
rgb_decimals = map(lambda x: int(x, 16), (rgb[0:2], rgb[2:4], rgb[4:6]))
rgb_coordinates = map(lambda x: x // 255, rgb_decimals)
hls_corrdinates = colorsys.rgb_to_hls(*rgb_coordinates)
hls = (
hls_corrdinates[0] * 360,
hls_corrdinates[1] * 100,
hls_corrdinates[2] * 100
)
return hls
def is_dark(integer):
''' 是否属于暗色 '''
if integer == 0:
return True
hls = int2hls(integer)
hue, lightness = hls[0:2]
# HSL 色轮见
# http://zh.wikipedia.org/zh-cn/HSL和HSV色彩空间
# 以下的数值都是我的主观判断认为是暗色
if (hue > 30 and hue < 210) and lightness < 33:
return True
if (hue < 30 or hue > 210) and lightness < 66:
return True
return False
def extract_params(argv):
''' 转换网址参数字符串为字典对象 '''
argv = unquote(argv)
params = {}
for arg in argv.split(','):
key, value = arg.split('=')
params[key] = value
return params
def play_url_fix(url):
''' 视频地址修复 '''
# 不知道为毛我不能解析 videoctfs.tc.qq.com 这个域名,即是用电信的 DNS 也是,
# 但是通过抓包分析,Flash 播放器获取时就变成 IP 了,
# 似乎是硬编码直接替换过的。
if url.startswith('http://videoctfs.tc.qq.com/'):
return url.replace('http://videoctfs.tc.qq.com/',
'http://183.60.73.103/', 1)
# 默认这个会返回 403
if url.startswith('http://vhot2.qqvideo.tc.qq.com/'):
key_part = re.findall(
'http://vhot2.qqvideo.tc.qq.com/(.+?)\?.*', url)[0]
url = 'http://vsrc.store.qq.com/{}?'.format(key_part)
url += 'channel=vhot2&sdtfrom=v2&r=256&rfc=v10'
return url
return url
########NEW FILE########
__FILENAME__ = video
class BaseVideo(object):
''' 视频基类 '''
def __init__(self):
# 唯一识别符号
self.uid = ''
# 视频标题
self.h1 = ''
self.h2 = ''
self.title = '未知标题'
# 过滤器
self.filter = None
# 视频长度
self.play_length = 0
# 视频地址
self.play_urls = []
# 弹幕列表
self.danmakus = []
# 正片位置
self.feature_start = 0
########NEW FILE########
__FILENAME__ = acfun
import re
import json
from ..libcore.const import NOT_SUPPORT, SCROLL, TOP, BOTTOM
from ..libcore.utils import extract_params
from ..libcore.fetcher import fetch
from ..libcore.danmaku import BaseDanmaku
from ..libcore.video import BaseVideo
class Danmaku(BaseDanmaku):
def __init__(self, entry):
self.entry = entry
self.raw = self._raw()
# 父类接口
self.start = self._start()
self.style = self._style()
self.color = self._color()
self.commenter = self._commenter()
self.content = self._content()
self.size_ratio = self._size_ratio()
self.is_guest = self._is_guest()
self.is_applaud = self._is_applaud()
def _raw(self):
attr_string = self.entry['c']
content_string = self.entry['m']
attrs = attr_string.split(',')
props = {
'start': float(attrs[0]),
'color': int(attrs[1]),
'style': int(attrs[2]),
'size': int(attrs[3]),
'commenter': attrs[4],
'publish': int(attrs[5]),
'content': content_string
}
return props
# 父类接口 #
def _start(self):
return self.raw['start']
def _style(self):
MAPPING = {
1: SCROLL,
2: NOT_SUPPORT, # 没搜到明确定义
3: NOT_SUPPORT, # 同上
4: BOTTOM,
5: TOP,
6: NOT_SUPPORT, # 没搜到明确定义
7: NOT_SUPPORT, # 高级弹幕,暂时不要考虑
8: NOT_SUPPORT, # 没搜到明确定义
}
return MAPPING.get(self.raw['style'], NOT_SUPPORT)
def _color(self):
return self.raw['color']
def _commenter(self):
return self.raw['commenter']
def _content(self):
return self.raw['content']
def _size_ratio(self):
FLASH_PLAYER_FONT_SIZE = 25
return self.raw['size'] / FLASH_PLAYER_FONT_SIZE
def _is_guest(self):
# 似乎 14 个字符长,还包含英文字母的就是游客
return len(self.raw['commenter']) == 14
def _is_applaud(self):
return False
class Video(BaseVideo):
def __init__(self, config, meta):
self.config = config
self.meta = meta
self.vid = self._vid()
self.cid = self._cid()
#print('信息:' + str(self.meta))
#print('信息:' + str(dict(vid=self.vid, cid=self.cid)))
# 父类接口
self.uid = 'vid:{}+cid:{}'.format(self.vid, self.cid)
self.h1 = self._h1()
self.h2 = self._h2()
self.title = self._title()
self.filter = self._filter()
(self.play_length,
self.play_urls) = self._play_info()
self.danmakus = self._danmakus()
self.feature_start = self._feature_start()
def _vid(self):
value = self.meta.get('vid')
if value is not None:
return value
raise Exception('无法获取 vid,请用辅助参数指定')
def _cid(self):
value = self.meta.get('cid')
if value is not None:
return value
url = 'http://www.acfun.tv/api/getVideoByID.aspx?vid=' + self.vid
text = fetch(url)
value = json.loads(text).get('cid')
# 换另一个 api 地址试试
if not value:
url = 'http://www.acfun.tv/video/getVideo.aspx?id=' + self.vid
text = fetch(url)
value = json.loads(text).get('danmakuId')
if value:
return value
raise Exception('无法获取 cid,请用辅助参数指定')
def _h1(self):
return self.meta.get('h1', '')
def _h2(self):
return self.meta.get('h2', '')
def _title(self):
if not self.h1:
return '未知标题'
if self.h2:
return self.h1 + ' - ' + self.h2
else:
return self.h1
def _filter(self):
# 不做了
return None
def _play_info(self):
# 不做了
return (0, [])
def _danmakus(self):
tpl = 'http://comment.acfun.tv/{}.json'
url = tpl.format(self.cid)
text = fetch(url)
orignal_danmakus = map(Danmaku, json.loads(text))
ordered_danmakus = sorted(orignal_danmakus, key=lambda d: d.start)
return ordered_danmakus
def _feature_start(self):
# 不做了
return 0
class Page(object):
def __init__(self, url):
self.url = url
self.video_class = Video
self.params = self._params()
def _params(self):
abbr_prefix = 'a://'
normal_prefix = 'http://www.acfun.tv/v/ac'
comment_prefix = 'http://comment.acfun.tv/'
url = self.url
params = {}
if url.startswith(abbr_prefix):
argv = url[len(abbr_prefix):]
params = extract_params(argv)
elif url.startswith(normal_prefix):
if '_' not in url:
url += '_1'
params = self.extract_params_from_normal_page(url)
elif url.startswith(comment_prefix):
vid = ''
cid = url[len(comment_prefix):-5]
params = dict(vid=vid, cid=cid)
return params
def extract_params_from_normal_page(self, url):
aid_reg = re.compile('/ac([0-9]+)')
vid_reg = re.compile('active" data-vid="(.+?)"')
h1_reg = re.compile('<h1>(.+?)</h1>')
text = fetch(url)
params = {}
params['aid'] = aid_reg.findall(url)[0]
params['vid'] = vid_reg.findall(text)[0]
params['h1'] = h1_reg.findall(text)[0]
return params
########NEW FILE########
__FILENAME__ = bilibili
import re
import json
from ..libcore.const import NOT_SUPPORT, SCROLL, TOP, BOTTOM
from ..libcore.utils import extract_params, play_url_fix
from ..libcore.fetcher import fetch
from ..libcore.filter import BaseFilter
from ..libcore.danmaku import BaseDanmaku
from ..libcore.video import BaseVideo
class Filter(BaseFilter):
def __init__(self, text):
self.text = text
(self.keywords,
self.users) = self._rules()
def _rules(self):
struct = json.loads(self.text)['up']
return struct['keyword'], struct['user']
def match(self, danmaku):
if danmaku.commenter in self.users:
return True
for keyword in self.keywords:
if keyword in danmaku.content:
return True
return False
class Danmaku(BaseDanmaku):
def __init__(self, text):
self.text = text
self.raw = self._raw()
# 父类接口
self.start = self._start()
self.style = self._style()
self.color = self._color()
self.commenter = self._commenter()
self.content = self._content()
self.size_ratio = self._size_ratio()
self.is_guest = self._is_guest()
self.is_applaud = self._is_applaud()
def _raw(self):
reg = re.compile('<d p="(.+?)">(.*?)</d>')
attr_string, content_string = reg.findall(self.text)[0]
attrs = attr_string.split(',')
props = {
'start': float(attrs[0]),
'style': int(attrs[1]),
'size': int(attrs[2]),
'color': int(attrs[3]),
'publish': int(attrs[4]),
'pool': int(attrs[5]), # 弹幕池
'commenter': attrs[6],
'uid': attrs[7], # 此弹幕的唯一识别符
'content': content_string
}
return props
# 父类接口 #
def _start(self):
return self.raw['start']
def _style(self):
MAPPING = {
1: SCROLL,
2: SCROLL, # 似乎也是滚动弹幕
3: SCROLL, # 同上
4: BOTTOM,
5: TOP,
6: SCROLL, # 逆向滚动弹幕,还是当滚动处理
7: NOT_SUPPORT, # 精准定位,暂时不要考虑
8: NOT_SUPPORT, # 高级弹幕,暂时不要考虑
}
return MAPPING.get(self.raw['style'], NOT_SUPPORT)
def _color(self):
return self.raw['color']
def _commenter(self):
return self.raw['commenter']
def _content(self):
return self.raw['content']
def _size_ratio(self):
FLASH_PLAYER_FONT_SIZE = 25
return self.raw['size'] / FLASH_PLAYER_FONT_SIZE
def _is_guest(self):
# 以 D 开头都是游客评论
return self.raw['commenter'].startswith('D')
def _is_applaud(self):
# 不是 0 就是特殊池
return self.raw['pool'] != 0
class Video(BaseVideo):
def __init__(self, config, meta):
self.config = config
self.meta = meta
self.cid = self._cid()
self.aid = self._aid()
#print('信息:' + str(self.meta))
#print('信息:' + str(dict(cid=self.cid, aid=self.aid)))
# 父类接口
self.uid = 'cid:' + self.cid
self.h1 = self._h1()
self.h2 = self._h2()
self.title = self._title()
self.filter = self._filter()
(self.play_length,
self.play_urls) = self._play_info()
self.danmakus = self._danmakus()
self.feature_start = self._feature_start()
def _cid(self):
value = self.meta.get('cid')
if value is not None:
return value
ids = []
for key, value in self.meta.items():
if key.endswith('id') and key != 'aid':
ids.append(value)
reg = re.compile('<chatid>(.+?)</chatid>')
for id in ids:
url = 'http://interface.bilibili.tv/player?id=' + id
text = fetch(url)
matches = reg.findall(text)
if matches:
return matches[0]
raise Exception('无法获取 cid,请用辅助参数指定')
def _aid(self):
value = self.meta.get('aid')
if value is not None:
return value
url = 'http://interface.bilibili.tv/player?id=cid:' + self.cid
text = fetch(url)
reg = re.compile('<aid>(.+?)</aid>')
matches = reg.findall(text)
if matches:
return matches[0]
else:
return None
# 父类接口 #
def _h1(self):
return self.meta.get('h1', '')
def _h2(self):
return self.meta.get('h2', '')
def _title(self):
if not self.h1:
return '未知标题'
if self.h2:
return self.h1 + ' - ' + self.h2
else:
return self.h1
def _filter(self):
if self.config.disable_video_filter:
return None
if not self.aid:
return None
tpl = 'http://comment.bilibili.tv/cloud/filter/{}.json'
url = tpl.format(self.aid)
text = fetch(url)
return Filter(text)
def _play_info(self):
tpl = 'http://interface.bilibili.tv/playurl?cid={}'
url = tpl.format(self.cid)
text = fetch(url)
# 有时可能获取不了视频元数据,多重试几次
tried = 0
while True:
if '视频隐藏' not in text or tried >= 5:
break
text = fetch(url, True)
tried += 1
reg = re.compile('<timelength>(.+?)</timelength>')
matches = reg.findall(text)
if matches:
play_length = int(float(matches[0])) // 1000
else:
play_length = 0
reg = re.compile('<url><!\[CDATA\[(.+?)\]\]></url>')
matches = reg.findall(text)
if matches:
play_urls = map(play_url_fix, matches)
else:
play_urls = []
return play_length, play_urls
def _danmakus(self):
tpl = 'http://comment.bilibili.tv/{}.xml'
url = tpl.format(self.cid)
text = fetch(url)
reg = re.compile('<d .*</d>')
matches = reg.findall(text)
orignal_danmakus = map(Danmaku, matches)
ordered_danmakus = sorted(orignal_danmakus, key=lambda d: d.start)
return ordered_danmakus
def _feature_start(self):
# 特殊池中,并且是高级弹幕,而且是最前的 10 条弹幕
reg = re.compile('Player.seek\(([0-9]+?)\);')
for danmaku in self.danmakus[:10]:
if not (danmaku.raw['pool'] == 2 and danmaku.raw['style'] == 8):
continue
matches = reg.findall(danmaku.content)
if matches:
return int(matches[0]) / 1000
return 0
class Page(object):
def __init__(self, url):
self.url = url
self.video_class = Video
self.params = self._params()
def _params(self):
abbr_prefix = 'b://'
secure_prefix = 'https://secure.bilibili.tv/secure,'
normal_prefix = 'http://www.bilibili.tv/video/av'
normal1_prefix = 'http://bilibili.kankanews.com/video/av'
comment_prefix = 'http://comment.bilibili.tv/'
url = self.url
params = {}
if url.startswith(abbr_prefix):
argv = url[len(abbr_prefix):]
params = extract_params(argv)
elif url.startswith(secure_prefix):
argv = url[len(secure_prefix):].replace('&', ',')
params = extract_params(argv)
elif url.startswith(normal_prefix) or url.startswith(normal1_prefix):
if url.endswith('/'):
url += 'index_1.html'
params = self.extract_params_from_normal_page(url)
elif url.startswith(comment_prefix):
aid = ''
cid = url[len(comment_prefix):-4]
params = dict(aid=aid, cid=cid)
return params
def extract_params_from_normal_page(self, url):
aid_reg = re.compile('/av([0-9]+)/')
cid_reg = re.compile("cid=([0-9]+)|cid:'(.+?)'")
h1_reg = re.compile('<h2 title="(.+?)">')
text = fetch(url)
params = {}
params['aid'] = aid_reg.findall(url)[0]
try:
cid_matches = cid_reg.findall(text)[0]
params['cid'] = cid_matches[0] or cid_matches[1]
params['h1'] = h1_reg.findall(text)[0]
except IndexError:
print('警告:无法获取 cid,此页面可能需要登录')
return params
class Part(object):
def __init__(self, url):
self.url = url
self.pages = self._pages()
def _pages(self):
text = fetch(self.url)
reg = re.compile("<option value='(.+?)'(?: selected)?>(.+?)</option>")
matches = reg.findall(text)
if not matches:
raise Exception('此页面没有找到多个分段')
pages = []
for link in matches:
url = self.full_urlify(link[0])
page = Page(url)
pages.append(page)
return pages
def full_urlify(self, fuzzy_url):
url = fuzzy_url
if url.startswith('/'):
url = 'http://www.bilibili.tv' + url
if fuzzy_url.endswith('/'):
url += 'index_1.html'
return url
########NEW FILE########
__FILENAME__ = config
from ..libcore.filter import CustomFilter
class Config(object):
def __init__(self, args):
self.args = args
self.assist_params = self._assist_params()
self.custom_filter = self._custom_filter()
self.disable_bottom_filter = self._disable_bottom_filter()
self.disable_guest_filter = self._disable_guest_filter()
self.disable_video_filter = self._disable_video_filter()
self.skip_patch = self._skip_patch()
self.merge_parts = self._merge_parts()
def _assist_params(self):
if not self.args['assist_params']:
return {}
params = {}
for pair in self.args['assist_params'].split(','):
key, value = pair.split('=')
params[key] = value
return params
def _custom_filter(self):
if not self.args['custom_filter']:
return []
filename = self.args['custom_filter']
with open(filename) as file:
text = file.read().strip() + '\n'
lines = map(lambda l: l.strip(), text.split('\n'))
lines = list(filter(lambda l: l != '', lines))
return CustomFilter(lines)
def _disable_bottom_filter(self):
return self.args['disable_bottom_filter']
def _disable_guest_filter(self):
return self.args['disable_guest_filter']
def _disable_video_filter(self):
return self.args['disable_video_filter']
def _skip_patch(self):
return self.args['skip_patch']
def _merge_parts(self):
return self.args['merge_parts']
########NEW FILE########
__FILENAME__ = producer
from ..libcore.filter import guest_filter, bottom_filter
from .config import Config
from .bilibili import Page as BilibiliPage, Part as BilibiliPart
from .acfun import Page as AcfunPage
from .tucao import Page as TucaoPage
def make_page(url):
if url.startswith('b://') or 'bilibili' in url:
page = BilibiliPage(url)
elif url.startswith('a://') or 'acfun' in url:
page = AcfunPage(url)
elif url.startswith('c://') or 'tucao' in url:
page = TucaoPage(url)
if page is None:
raise Exception('不支持的网址')
return page
def make_part_pages(url):
prefixes = ['http://www.bilibili.tv/video/av',
'http://bilibili.kankanews.com/video/av']
for prefix in prefixes:
if url.startswith(prefix):
return BilibiliPart(url).pages
raise Exception('此网址不支持自动合并分段')
def make_video(config, page):
meta = page.params.copy()
meta.update(config.assist_params)
return page.video_class(config, meta)
class ProxyDanmaku(object):
''' 代理弹幕类
解决补丁这种蛋疼情况
'''
def __init__(self, danmaku, offset):
self.danmaku = danmaku
self.offset = offset
self.start = self._start()
def _start(self):
return self.danmaku.start + self.offset
def __getattr__(self, name):
return getattr(self.danmaku, name)
class Producer(object):
def __init__(self, args, bootstrap_url):
self.config = Config(args)
self.bootstrap_url = bootstrap_url
self.title = '未知标题'
self.pages = []
self.videos = []
def start_download(self):
if self.config.merge_parts:
self.pages = make_part_pages(self.bootstrap_url)
else:
self.pages = [make_page(self.bootstrap_url)]
self.videos = []
for page in self.pages:
video = make_video(self.config, page)
self.videos.append(video)
video = self.videos[0]
if self.config.merge_parts:
self.title = video.h1
else:
self.title = video.title
def start_handle(self):
self.init_filter_danmakus()
def init_filter_danmakus(self):
keeped_danmakus = []
filter_detail = dict(
bottom=0,
guest=0,
video=0,
custom=0
)
custom_filter = self.config.custom_filter
part_offset = 0
for i, video in enumerate(self.videos):
# 处理偏移 #
offset = 0
# 合并分段
if self.config.merge_parts:
if i != 0:
prev_video = self.videos[i - 1]
part_offset += prev_video.play_length
offset = part_offset
# 跳过补丁
if self.config.skip_patch:
offset -= video.feature_start
# 处理过滤 #
for danmaku in video.danmakus:
if not self.config.disable_guest_filter:
if guest_filter.match(danmaku):
filter_detail['guest'] += 1
continue
if not self.config.disable_bottom_filter:
if bottom_filter.match(danmaku):
filter_detail['bottom'] += 1
continue
if not self.config.disable_video_filter:
if video.filter and video.filter.match(danmaku):
filter_detail['video'] += 1
continue
if custom_filter:
if custom_filter.match(danmaku):
filter_detail['custom'] += 1
continue
# 算上偏移加入保留列表中
danmaku = ProxyDanmaku(danmaku, offset)
keeped_danmakus.append(danmaku)
self.keeped_danmakus = keeped_danmakus
self.filter_detail = filter_detail
self.blocked_count = sum(filter_detail.values())
self.passed_count = len(keeped_danmakus)
self.total_count = self.blocked_count + self.passed_count
########NEW FILE########
__FILENAME__ = tucao
import re
from ..libcore.const import NOT_SUPPORT, SCROLL, TOP, BOTTOM
from ..libcore.utils import extract_params
from ..libcore.fetcher import fetch
from ..libcore.danmaku import BaseDanmaku
from ..libcore.video import BaseVideo
class Danmaku(BaseDanmaku):
def __init__(self, text):
self.text = text
self.raw = self._raw()
# 父类接口
self.start = self._start()
self.style = self._style()
self.color = self._color()
self.commenter = self._commenter()
self.content = self._content()
self.size_ratio = self._size_ratio()
self.is_guest = self._is_guest()
self.is_applaud = self._is_applaud()
def _raw(self):
reg = re.compile("<d p='(.+?)'><!\[CDATA\[(.*?)\]\]></d>")
attr_string, content_string = reg.findall(self.text)[0]
attrs = attr_string.split(',')
props = {
'start': float(attrs[0]),
'style': int(attrs[1]),
'size': int(attrs[2]),
'color': int(attrs[3]),
'publish': int(attrs[4]),
'content': content_string
}
return props
# 父类接口 #
def _start(self):
return self.raw['start']
def _style(self):
MAPPING = {
1: SCROLL,
2: SCROLL,
3: SCROLL,
4: BOTTOM,
5: TOP,
6: SCROLL,
7: NOT_SUPPORT,
8: NOT_SUPPORT,
}
return MAPPING.get(self.raw['style'], NOT_SUPPORT)
def _color(self):
return self.raw['color']
def _commenter(self):
# 没有可以判断的依据
return 'blank'
def _content(self):
return self.raw['content']
def _size_ratio(self):
FLASH_PLAYER_FONT_SIZE = 25
return self.raw['size'] / FLASH_PLAYER_FONT_SIZE
def _is_guest(self):
# 没有可以判断的依据
return False
def _is_applaud(self):
return False
class Video(BaseVideo):
def __init__(self, config, meta):
self.config = config
self.meta = meta
self.aid = self._aid()
self.pid = self._pid()
#print('信息:' + str(self.meta))
#print('信息:' + str(dict(aid=self.aid, pid=self.pid)))
# 父类接口
self.uid = 'pid:' + self.pid
self.h1 = self._h1()
self.h2 = self._h2()
self.title = self._title()
self.filter = self._filter()
(self.play_length,
self.play_urls) = self._play_info()
self.danmakus = self._danmakus()
self.feature_start = self._feature_start()
def _aid(self):
value = self.meta.get('aid')
if value is not None:
return value
raise Exception('无法获取 aid,请用辅助参数指定')
def _pid(self):
return '11-' + self.aid + '-1-0'
# 父类接口 #
def _h1(self):
return self.meta.get('h1', '')
def _h2(self):
return self.meta.get('h2', '')
def _title(self):
if not self.h1:
return '未知标题'
if self.h2:
return self.h1 + ' - ' + self.h2
else:
return self.h1
def _filter(self):
# 不做了
return None
def _play_info(self):
# 不做了
return (0, [])
def _danmakus(self):
tpl = 'http://www.tucao.cc/index.php?' + \
'm=mukio&c=index&a=init&playerID={}&r=205'
url = tpl.format(self.pid)
text = fetch(url)
reg = re.compile('<d .*</d>')
matches = reg.findall(text)
orignal_danmakus = map(Danmaku, matches)
ordered_danmakus = sorted(orignal_danmakus, key=lambda d: d.start)
return ordered_danmakus
def _feature_start(self):
# 不做了
return 0
class Page(object):
def __init__(self, url):
self.url = url
self.video_class = Video
self.params = self._params()
def _params(self):
abbr_prefix = 'c://'
normal_prefix = 'http://www.tucao.cc/play/'
url = self.url
params = {}
if url.startswith(abbr_prefix):
argv = url[len(abbr_prefix):]
params = extract_params(argv)
elif url.startswith(normal_prefix):
params = self.extract_params_from_normal_page(url)
return params
def extract_params_from_normal_page(self, url):
aid_reg = re.compile('/play/h([0-9]+)/')
h1_reg = re.compile("add_favorite\('(.+?)'\);")
text = fetch(url)
params = {}
params['aid'] = aid_reg.findall(url)[0]
params['h1'] = h1_reg.findall(text)[0]
return params
########NEW FILE########
|
20,497 | dec0175b39707e21ea15db7ed7362023c049144d | import asyncio
import aiohttp
import json
import random
import sys
from datetime import datetime, timezone
#TIMESTAMP = "2006-01-02T15:04:05Z07:00"
TIMESTAMP = datetime.now(timezone.utc).astimezone().isoformat()
def generate_route_message(id_, edges, seen):
return {"cmd": "ROUTE",
"id": id_,
"edges": edges,
"seen": seen}
def generate_cmd(cmd, id_, timestamp):
return {"cmd": cmd,
"id": id_,
"expire_time": timestamp}
async def test_client(loop, account_number, node_id):
basic_auth = aiohttp.BasicAuth(account_number, "imapassord")
session = aiohttp.ClientSession(auth=basic_auth)
headers={"x-rh-identity" : "eyJpZGVudGl0eSI6IHsiYWNjb3VudF9udW1iZXIiOiAiMDAwMDAwMSIsICJpbnRlcm5hbCI6IHsib3JnX2lkIjogIjAwMDAwMSJ9fX0="}
ws = await session.ws_connect('http://localhost:8080/receptor-controller', headers=headers)
edges = [["node-a", "node-b", 1]]
seen = []
async def periodic_writer():
await asyncio.sleep(2)
while True:
print("writing")
await ws.send_str(json.dumps(generate_route_message(node_id, edges, seen)))
delay_msecs = random.randrange(100, 1000) / 1000
await asyncio.sleep(delay_msecs)
loop.create_task(periodic_writer())
while True:
print("here")
msg = await ws.receive()
print("there")
#print("type(msg):", type(msg))
#print("dir(msg):", dir(msg))
if msg.type == aiohttp.WSMsgType.text:
if msg.data[:2] == "HI":
print("Gotta HI...")
print("Sending HI...")
await ws.send_str(json.dumps(generate_cmd("HI", node_id, TIMESTAMP)))
#await ws.send_str("ROUTE:node-x:timestamp")
if msg.data == 'close':
print("CLOSE!")
await ws.close()
break
else:
print("recv:", msg.data)
# await ws.send_str(msg.data + '/answer')
elif msg.type == aiohttp.WSMsgType.closed:
print("WSMsgType.closed")
break
elif msg.type == aiohttp.WSMsgType.error:
print("WSMsgType.error")
break
if __name__ == "__main__":
loop = asyncio.new_event_loop()
coros = [test_client(loop, "%02d"%i, "node_%02d"%i) for i in range(int(sys.argv[1]), int(sys.argv[2]))]
loop.run_until_complete(asyncio.wait(coros))
#task = loop.create_task(test_client(loop, sys.argv[1], sys.argv[2]))
#loop.run_until_complete(coros)
|
20,498 | 7dcdd6e4b8186b04f0b4376ba4940706d05adcc3 | # Poorly named for now since this client doesn't even have a UI, but this is to try and keep consistent with the client
from message import Message, RMessage
from threading import Thread
import json
class UI:
def __init__(self, session):
self.rwidgets = []
self.sess = session
self.remoteui_thread = Thread(target=self.remoteui)
self.remoteui_thread.daemon = True
self.remoteui_thread.start()
self.charlist = None
self.chats = {}
self.areaChat = None
# These methods are the core of botting, and the foundation of this class. For consistency I'll keep these going.
# public void addwidget(int id, int parent, Object[] pargs)
def addwdidget(self, id_, parent, pargs):
print("Added: " + "\t id: " + str(id_) + "\tparent: " + str(parent) + "\tpargs: " + str(pargs))
# public void newwidget(int id, String type, int parent, Object[] pargs, Object... cargs)
def newwidget(self, id_, type_, parent, pargs, cargs):
print("\n" + "id: " + str(id_) + "\ttype: " + str(type_) + "\tparent: " + str(parent) + "\tpargs: " + str(
pargs) + "\tcargs: " + str(cargs))
if type_ == "charlist":
self.charlist = id_
# Area and Village chat seem to take the form of mchat
# Realm chat seems to be ui/rchan:18... so if type_ in ui/rchan:
# Private chat is just pmchat
if type_ == "mchat" or "ui/rchan" in type_ or type_ == "pmchat":
self.chats[id_] = cargs[0]
print("Found " + str(cargs[0]))
self.sess.websocket.sendMessage(json.dumps({
'type': 'chat_add',
'id': id_,
'name': str(cargs[0])
}))
# public void uimsg(int id, String msg, Object... args)
def uimsg(self, id_, msg, args):
print("\n" + "id: " + str(id_) + "\tmsg: " + str(msg) + "\targs: " + str(args))
# TODO: Clean and refactor below. This needs to be much cleaner.
if msg == "add":
if self.charlist == id_:
print("adding a character to the charlist...")
self.sess.websocket.sendMessage(json.dumps({
'type': 'char_add',
'name': args[0]
}))
if msg == "msg":
if id_ in self.chats:
print("Sending off a chat msg...")
self.sess.websocket.sendMessage(json.dumps({
'type': 'chat_msg',
'chat_id': id_,
'msg': args
}))
# public void destroy(int id)
def destroy(self, id_):
print("Destroyed: " + str(id_))
# This loops through and dispatches the sess.ui msgs to the right function
# TODO: Add a wait in this loop so it isnt so spammy
def remoteui(self):
while True:
msg = None
while True:
msg = self.sess.getuimsg()
if msg is None:
break
msg_type = msg.read_uint8()
if msg_type == RMessage.RMSG_NEWWDG:
id_ = msg.read_uint16()
type_ = msg.read_string()
parent = msg.read_uint16()
pargs = msg.read_list()
cargs = msg.read_list()
self.newwidget(id_, type_, parent, pargs, cargs)
elif msg_type == RMessage.RMSG_WDGMSG:
id_ = msg.read_uint16()
name = msg.read_string()
args = msg.read_list()
self.uimsg(id_, name, args)
elif msg_type == RMessage.RMSG_DSTWDG:
id_ = msg.read_uint16()
self.destroy(id_)
elif msg_type == RMessage.RMSG_ADDWDG:
id_ = msg.read_uint16()
parent = msg.read_uint16()
pargs = msg.read_list()
self.addwdidget(id_, parent, pargs)
|
20,499 | 0aca9434310bb48bca7fe67ecea6758345e6b75e | from locators import *
class BasePage(object):
'''Base class to initialize the base page which is inherited by other Pages'''
def __init__(self, driver):
self.driver = driver
class NomisSolutionsPage(BasePage):
'''This is the page for the Nomis Solutions Home Page'''
def click_get_started_btn(self):
''' Method used to navigate to the get started page'''
self.driver.find_element(*NomisSolutionsPageLocators.GET_STARTED_BUTTON).click()
def page_title(self):
''' Return page title'''
return self.driver.title
def page_url(self):
''' Return page url'''
return self.driver.current_url
def search_box_enter(self, text):
'''Method used to click on search box and submit the input by user
Args:
text (string): Text to be entered and submitted in the search box
'''
element = self.driver.find_element(*NomisSolutionsPageLocators.SEARCH_BOX)
element.send_keys(text)
self.driver.find_element(*NomisSolutionsPageLocators.SEARCH_BUTTON).click()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.