index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
989,800 | ebcb6f2069f136f95a9eefcf76958f09abd27ea3 | n = int(input())
A = list(map(int,input().split()))
import math
from functools import reduce
def lcm(a:int,b:int):
return a // math.gcd(a, b) * b
def lcm_list(numbers):
return reduce(lcm, numbers)
x = lcm_list(A)
ans = 0
for a in A:
ans += (x-1)%a
print (ans)
|
989,801 | 91e60e986f0dc9c487ae72fc07f77655e0e99e82 | from . import db
import datetime
from marshmallow import fields, Schema
from .ProductModel import ProductModelSchema
class ContractProductModel(db.Model):
__tablename__ = 'contract_products'
id = db.Column(db.Integer,primary_key=True, autoincrement=True)
contract_id = db.Column(db.Integer, db.ForeignKey('contracts.id'), nullable=False)
product_id = db.Column(db.Integer, db.ForeignKey('products.id'), nullable=False)
product_quantity = db.Column(db.Integer)
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
created_by = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
modified_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
modified_by = db.Column(db.Integer, db.ForeignKey('users.id'))
products = db.relationship('ProductModel', backref='contract_products', lazy=True)
def __init__(self,data):
self.id = data.get('id')
self.contract_id = data.get('contract_id')
self.product_id = data.get('product_id')
self.product_quantity = data.get('product_quantity')
self.created_at = data.get('created_at')
self.created_by = data.get('created_by')
self.modified_at = data.get('modified_at')
self.modified_by = data.get('modified_by')
def save(self):
db.session.add(self)
db.session.commit()
def update(self, data):
for key, item in data.items():
setattr(self, key, item)
self.modified_at = datetime.datetime.utcnow()
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
@staticmethod
def get_all_contract_products():
return ContractProductModel.query.all()
@staticmethod
def get_one_contract_product(id):
return ContractProductModel.query.get(id)
def __repr__(self):
return '<id {}>'.format(self.id)
class ContractProductSchema(Schema):
"""
Contract Product Schema
"""
id = fields.Int(dump_only=True)
contract_id = fields.Int(required=True)
product_id = fields.Int(required=True)
product_quantity = fields.Int(required=True)
created_at = fields.DateTime(dump_only=True)
created_by = fields.Int(dump_only=True)
modified_at = fields.DateTime(dump_only=True)
modified_by = fields.Int(dump_only=True)
products = fields.Nested(ProductModelSchema) |
989,802 | a27b246da0b6c5498af0d6b9fed3e84a2317a4b3 | import re
import numpy as np
__DELETE_CONF__ = False
# set DELETE CONF to True for CoNLL-2003
begin_pattern = re.compile(r'^B-*')
mid_pattern = re.compile(r'^I-*')
out_pattern = re.compile(r'^O')
def conflict(anchor_1, anchor_2):
if (anchor_1[0] < anchor_2[0]) and \
(anchor_2[0] < anchor_1[1]) and \
(anchor_1[1] < anchor_2[1]):
return True
if (anchor_1[0] > anchor_2[0]) and \
(anchor_1[0] < anchor_2[1]) and \
(anchor_2[1] < anchor_1[1]):
return True
return False
def detect_conflict(candi_group, prob_group, cls_group,
roi_feature_group, roi_elmo_feature_group,
roi_label_group, roi_len_group, roi_char_ids_group,
roi_word_lengths_group, sen_last_hidden_group,
left_context_word_group, left_context_len_group,
right_context_word_group, right_context_len_group):
"""
Accept the anchor with highest prob
Delete conflict anchors
"""
roi_feature_nonconf, roi_elmo_feature_nonconf, roi_label_nonconf, roi_len_nonconf = [], [], [], []
roi_char_ids_nonconf, roi_word_lengths_nonconf, sen_last_hidden_nonconf = [], [], []
left_context_word_nonconf, left_context_len_nonconf = [], []
right_context_word_nonconf, right_context_len_nonconf = [], []
keep = []
orders = np.argsort(-np.array(prob_group))
while orders.size > 0:
save_item = list(range(orders.shape[0]))
# Accept the anchor with hightest prob
highest_idx = orders[0]
keep.append(highest_idx)
save_item.remove(0)
if __DELETE_CONF__:
# delete conflict anchors
for k in range(1, len(orders)):
if conflict(candi_group[highest_idx], candi_group[orders[k]]):
save_item.remove(k)
orders = orders[save_item]
for idx in keep:
# output probs and labels
roi_feature_nonconf.append(roi_feature_group[idx])
roi_elmo_feature_nonconf.append(roi_elmo_feature_group[idx])
roi_label_nonconf.append(roi_label_group[idx])
roi_len_nonconf.append(roi_len_group[idx])
roi_char_ids_nonconf.append(roi_char_ids_group[idx])
roi_word_lengths_nonconf.append(roi_word_lengths_group[idx])
sen_last_hidden_nonconf.append(sen_last_hidden_group[idx])
left_context_word_nonconf.append(left_context_word_group[idx])
left_context_len_nonconf.append(left_context_len_group[idx])
right_context_word_nonconf.append(right_context_word_group[idx])
right_context_len_nonconf.append(right_context_len_group[idx])
return roi_feature_nonconf, roi_elmo_feature_nonconf, roi_label_nonconf, roi_len_nonconf, roi_char_ids_nonconf, roi_word_lengths_nonconf, sen_last_hidden_nonconf, left_context_word_nonconf, left_context_len_nonconf, right_context_word_nonconf, right_context_len_nonconf
def load_contain_dict(file_name):
contain_dict = {}
for line in open(file_name, 'r'):
arr = line.strip().split('\t')
if arr[0] not in contain_dict.keys():
contain_dict[arr[0]] = []
contain_dict[arr[0]].append([int(arr[1]), int(arr[2]), int(arr[3])])
return contain_dict
def load_cls_dict(file_name):
cls_dict = dict()
for line in open(file_name):
arr = line.strip().split(':')
cls_dict[arr[0]] = int(arr[1])
return cls_dict
def load_label_dict(file_name):
try:
f = open(file_name)
d = dict()
for idx, word in enumerate(f):
word = word.strip()
d[idx] = word
return d
except:
pass
def get_contain_entity(words, idx_arr, contain_dict):
idx_1 = idx_arr[1]
idx_2 = idx_arr[2]
entity_words = words[idx_1: idx_2+1]
long_entity = ' '.join(entity_words)
if long_entity in contain_dict.keys():
short_list = contain_dict[long_entity]
return_list = []
for s in short_list:
s_type = s[0]
s_idx_1 = idx_1 + s[1]
s_idx_2 = idx_1 + s[2]
return_list.append([s_type, s_idx_1, s_idx_2])
return return_list
return []
def get_anchor_label(idx1, idx2, true_entity, sen_len):
"""
Return the label of an input idx pair
1, id: positive pair, class_id
0, 0: negitive pair
-1, -1: unvalid pair
"""
if idx1 >=0 and idx2 < sen_len:
if idx1 in true_entity:
for candidate in true_entity[idx1]:
if candidate[0] == idx2:
return 1, candidate[1] # true entity pair
return 0, 0 # neg pair
return -1, -1 # labels out of boundary
def k_anchors(true_entity, sen_len, idx):
"""
Generate 5 types of anchors and labels for each word
sentence: A B C D E
word: C
idx: 2
"""
anchors = []
anchor_labels = []
cls_ids = []
sample_indexes = []
entity_tag = False
# type 1: C
anchors.append([idx, idx])
an_label, cls_id = get_anchor_label(
idx, idx, true_entity, sen_len)
anchor_labels.append(an_label)
cls_ids.append(cls_id)
if (an_label == 1):
entity_tag = True
# type 2: CD
anchors.append([idx, idx + 1])
an_label, cls_id = get_anchor_label(
idx, idx + 1, true_entity, sen_len)
anchor_labels.append(an_label)
cls_ids.append(cls_id)
if (an_label == 1):
entity_tag = True
# type 3: BCD
anchors.append([idx - 1, idx + 1])
an_label, cls_id = get_anchor_label(
idx - 1, idx + 1, true_entity, sen_len)
anchor_labels.append(an_label)
cls_ids.append(cls_id)
if (an_label == 1):
entity_tag = True
# type 4: BCDE
anchors.append([idx - 1, idx + 2])
an_label, cls_id = get_anchor_label(
idx - 1, idx + 2, true_entity, sen_len)
anchor_labels.append(an_label)
cls_ids.append(cls_id)
if (an_label == 1):
entity_tag = True
# type 5: ABCDE
anchors.append([idx - 2, idx + 2])
an_label, cls_id = get_anchor_label(
idx - 2, idx + 2, true_entity, sen_len)
anchor_labels.append(an_label)
cls_ids.append(cls_id)
# type 6: ABCDEF
anchors.append([idx - 2, idx + 3])
an_label, cls_id = get_anchor_label(
idx - 2, idx + 3, true_entity, sen_len)
anchor_labels.append(an_label)
cls_ids.append(cls_id)
if (an_label == 1):
entity_tag = True
if (entity_tag == True):
# add other entities as negs
sample_indexes = list(range(idx*5, (idx+1)*5))
#print("*******word idx", idx, "sample indexes:", sample_indexes)
return anchors, anchor_labels, cls_ids, sample_indexes
def get_pairs(line):
"""
Description:
extract entity pairs from each line
type: line: str
sentence+'\t'+entity_pairs
sentence: words (split: ' ')
entity pairs: (split: '\t')
each pair: entity_type, start_idx, end_idx
rtype:
idx: dict
key: begin_idx
value: [end_idx, entity_type]
"""
# get positive anchor pairs
idx_dict = {}
arr = line.strip().split('\t')
# print(arr)
for i in range(2, len(arr)):
info = arr[i].split(' ')
entity_type = int(info[0])
begin_idx = int(info[1])
end_idx = int(info[2])
if begin_idx not in idx_dict.keys():
idx_dict[begin_idx] = []
idx_dict[begin_idx].append([end_idx, entity_type])
# get sentence length
words = arr[0].split(' ')
sen_len = len(words)
return idx_dict, sen_len
def generate_anchor():
#label_dict, cls_dict, contain_dict):
def f(line):
"""
Generate anchors and anhcor labels for one line
Input: tags for one line
Output: anchors and anchor labels for one line
"""
# get positive index pairs
pos_pair, sen_len = get_pairs(line)
# total data
line_anchor, line_label, line_cls = [], [], []
sample_indexes = []
for word_idx in range(sen_len):
# get anchors and labels for each line
anchors, labels, cls, s_indexes = k_anchors(pos_pair, sen_len, word_idx)
# append word reuslt
line_anchor += anchors
line_label += labels
line_cls += cls
sample_indexes += s_indexes
# return total data
return line_anchor, line_label, line_cls, sample_indexes
return f
|
989,803 | d2b49dfac51f982cb2fa6d7bedddc8613819ccb4 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 13:48:50 2020
We use hill climbing algorithm with random initial state to solve
the n-queen problem.
@author: Hongxu Chen
"""
import random
# Return all successor of a given state with given static point.
def succ(state, static_x, static_y):
# Return an empty list when there is no queen in the static point
if(state[static_x]!=static_y):
return []
# list to store all the successor
successor = []
for i in range(len(state)):
# find the successor when we are not searching at the static column
if i != static_x:
# Move the queen downside as a successor when the queen
# is not at the top
if state[i]-1 != -1:
new_state = state.copy()
new_state[i] = state[i]-1
successor.append(new_state)
# Move the queen upside as a successor when the queen
# is not at the bottom
if state[i]+1 != len(state):
new_state = state.copy()
new_state[i] = state[i]+1
successor.append(new_state)
# Sort all successor
successor.sort()
return successor
# Return the value of a given state
def f(state):
number = 0
# List to mark if a queen will be attacked or not
counted = [1]*len(state)
for i in range(len(state)):
# Find all the queens that will be attacked in horizon or diagonal
for j in range(i+1,len(state)):
# If a queen is in the same or diagonal position, mark as 0.
if state[j] == state[i] or state[j] == state[i] + (j-i) \
or state[j] == state[i] - (j-i):
counted[i] = 0
counted[j] = 0
# Count the number of 0 is the list
for i in range(len(counted)):
if counted[i] == 0:
number = number+1
return number
# Return the next state from all the successor
def choose_next(curr, static_x, static_y):
# Return none if there is no queen in the static point
if curr[static_x] != static_y:
return None
successor = succ(curr, static_x, static_y)
# Include the current state in the list of the successor
successor.append(curr)
# Sort all the successor
successor.sort()
next = successor[0]
min = f(successor[0])
# Find the state such that has the min value.
for suc in successor:
new_f = f(suc)
if new_f<min:
next = suc
min = new_f
return next
# Return the goal state from a given intial state,
# print the path if pri=True, do not print if pri=False.
def n_queens_print(initial_state, static_x, static_y, pri):
# Initialize two pointers to denote the previous and current state.
prev = initial_state
curr = choose_next(prev, static_x, static_y)
# Initialize two values to denote the value of previous and current state.
curr_f = f(curr)
pre_f = f(initial_state)
# Keep finding the next state while two continguous state
# do not have the same value.
while curr_f < pre_f:
if pri:
print(str(prev) + " - f=" + str(pre_f))
prev = curr
curr = choose_next(curr, static_x, static_y)
pre_f = curr_f
curr_f = f(curr)
# Print only one state if the value is 0.
if curr_f == 0:
if pri:
print(str(prev) + " - f=" + str(pre_f))
return prev
# Print two states if the value is not 0, which means
# we encounter two states with the same value.
if pri:
print(str(prev) + " - f=" + str(pre_f))
print(str(curr) + " - f=" + str(curr_f))
return curr
# Call previous method with pri=True to print the path.
def n_queens(initial_state, static_x, static_y):
return n_queens_print(initial_state, static_x, static_y, True)
# Randomly select k initial state and return the optimal value.
def n_queens_restart(n, k, static_x, static_y):
random.seed(1)
# List to store all the optimal state
goal = []
min = n
# Select k many initial states
for i in range(k):
# Generate random initial states except for the static point
initial = [1]*n
for j in range(n):
if j == static_x:
initial[j] = static_y
else:
initial[j] = random.randint(0,n-1)
# Get optimal state using the n_queens_print method without printing
new_goal = n_queens_print(initial, static_x, static_y, False)
new_f = f(new_goal)
# Empty the list if the current value is strictly less than
# the previous min, and insert the current state into the list
if new_f < min:
min = new_f
goal = []
goal.append(new_goal)
# Insert the state into the list when current value is the same as min.
elif new_f == min:
goal.append(new_goal)
# Break if we min value is 0, which means we find the state.
if min == 0:
break;
# Sort the state list and print them.
goal.sort()
for state in goal:
print(str(state) + " - f=" + str(f(state)))
|
989,804 | ccb6b626dcd46d8092e92693023e26684fe70c55 | -X FMLP -Q 0 -L 4 139 400
-X FMLP -Q 0 -L 4 100 300
-X FMLP -Q 1 -L 2 71 250
-X FMLP -Q 1 -L 2 64 200
-X FMLP -Q 2 -L 1 49 150
-X FMLP -Q 2 -L 1 48 400
-X FMLP -Q 3 -L 1 43 200
-X FMLP -Q 3 -L 1 41 150
40 125
37 150
36 125
33 200
26 100
|
989,805 | b303b8f624cc93a527fcce660b1acd16e6856181 | import sys
sys.stdin = open('5249.txt')
def find(n):
if n == p[n]:
return n
else:
return find(p[n])
def union(s, e):
n1, n2 = find(s), find(e)
if n1 > n2:
p[n1] = n2
else:
p[n2] = n1
T = int(input())
for t in range(1, T + 1):
V, E = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(E)]
arr.sort(key=lambda x: x[2])
p = list(range(V + 1))
ans = cnt = 0
for s, e, d in arr:
if find(s) != find(e):
union(s, e)
ans += d
cnt += 1
if cnt == V:
break
print('#{} {}'.format(t, ans)) |
989,806 | aaaca1623cd6bdac79b7bf18c41748fae13a3de3 | #首先输入行数列数
#esc退出
#r复位
#t提示
import numpy as np
import pygame
from pygame.locals import *
import random
class Labyrinth:
def __init__(self, rows=30, cols=40):
self.rows = rows
self.cols = cols
self.keep_going = 1
#keep_going = 1代表生成继续进行
self.M=np.zeros((rows,cols,3), dtype=np.uint8)
self.laby=np.ones((rows*2+1,cols*2+1),dtype=np.uint8)
self.N=np.zeros((rows*2+1,cols*2+1,2), dtype=np.uint8)
self.start=[1,0]
self.end = [rows*2-1, cols*2]
self.direction = [[-1, 0], [0, -1], [1, 0], [0, 1]]
def createlaby(self):
M = self.M
r = 0 #row
c = 0 #column
history = [(r,c)]
rows=self.rows
cols=self.cols
while history:
r,c = random.choice(history)
#随机选个格子
M[r,c,2] = 1
history.remove((r,c))
check = []
if c > 0:
if M[r,c-1,2]==1:
check.append('L')
elif M[r,c-1,2]==0:
history.append((r,c-1))
M[r,c-1,2]=2
if r > 0:
if M[r-1,c,2]==1:
check.append('U')
elif M[r-1,c,2]==0:
history.append((r-1,c))
M[r-1,c,2]=2
if c < cols-1:
if M[r,c+1,2]==1:
check.append('R')
elif M[r,c+1,2]==0:
history.append((r,c+1))
M[r,c+1,2]=2
if r < rows-1:
if M[r+1,c,2]==1:
check.append('D')
elif M[r+1,c,2]==0:
history.append((r+1,c))
M[r+1,c,2]=2
#开墙
#M(右,下,visited)
if len(check):
move_direction=random.choice(check)
if move_direction=='L':
M[r,c-1,0]=1
elif move_direction == 'U':
M[r-1,c,1]=1
elif move_direction == 'R':
M[r,c,0]=1
elif move_direction == 'D':
M[r,c,1]=1
else:
print('Error:select one of wall')
laby = self.laby
#laby矩阵中0代表路,1代表墙
for row in range(0,rows):
for col in range(0,cols):
cell_data = M[row,col]
laby[2*row+1,2*col+1]=0
if cell_data[0] == 1:
laby[2*row+1,2*col+2]=0
if cell_data[1] == 1:
laby[2*row+2,2*col+1]=0
laby[1][0]=0
laby[-2][-1]=0
N=self.N
for i in range(0,2*rows):
for j in range(0,2*cols):
if laby[i,j]==1:
N[i,j,0]=1
N[i,j,1]=1
elif laby[i,j]==0:
if laby[i,j+1]==1:
N[i,j,0]=1
if laby[i+1,j]==1:
N[i,j,1]=1
N[2*rows,:,0]=N[2*rows,:,1]=N[:,2*cols,0]=N[:,2*cols,1]=1
return laby
def solve_laby(self,i,j):
#解迷宫
self.start=[i,j]
steps=self.walk()
last =steps[len(self.laby) - 2][len(self.laby[0]) - 1]
lookup_path = [[len(self.laby) - 2, len(self.laby[0]) - 1], ]
while last > 0:
last -= 1
index = lookup_path[-1]
for d in self.direction:
move=[0,0]
move[0]=index[0]+d[0]
move[1]=index[1]+d[1]
val, err = self.at(steps, move)
if val == last:
lookup_path.append(move)
break
lookup_path.pop()
lookup_path.reverse()
lookup_path.pop()
lookup_path.append([i,j])
return lookup_path
def at(self, grid, x):
#解迷宫
if x[0] < 0 or x[0] >= len(grid):
return 0,False
if x[1] < 0 or x[1] >= len(grid[0]):
return 0,False
return grid[x[0]][x[1]], True
def walk(self):
#解迷宫
steps = [[i * 0 for i in range(len(self.laby[0]))] for j in range(len(self.laby))]
Q = [self.start]
while len(Q) > 0:
index = Q[0]
if index == self.end:
break
Q = Q[1:]
for d in self.direction:
move=[0,0]
move[0]=index[0]+d[0]
move[1]=index[1]+d[1]
val, ok = self.at(self.laby,move)
if not ok or val == 1:
continue
val, ok = self.at(steps,move)
if not ok or val != 0:
continue
if move == self.start:
continue
val, ok = self.at(steps, index)
if ok:
steps[move[0]][move[1]] = val + 1
Q.append(move)
return steps
class Game:
def __init__(self,num_rows,num_cols):
self.size = (600,600)
self.screen = pygame.display.set_mode(self.size)
pygame.display.set_caption('Labyrinth')
font = pygame.font.SysFont(pygame.font.get_default_font(), 55)
text = font.render("Generating...", 1, (255,255,255))
rect = text.get_rect()
rect.center = self.size[0]/2, self.size[1]/2
self.screen.blit(text, rect)
pygame.display.update(rect)
self.rows=num_rows
self.cols=num_cols
self.solve_laby=False
def start(self):
if True:
self.laby_obj = Labyrinth(self.rows,self.cols)
else:
self.laby_obj = Labyrinth(10,10)
self.laby_obj.createlaby()
self.draw_laby()
self.reset_player()
self.loop()
def draw_laby(self):
self.screen.fill((255,255,255))
self.cell_width = self.size[0]/(self.cols*2+1)
self.cell_height = self.size[1]/(self.rows*2+1)
cols=self.cols
rows=self.rows
for i in range(rows*2+1):
for j in range(cols*2+1):
if self.laby_obj.laby[i,j]==1:
pygame.draw.rect(self.screen,(0,0,0),(j*self.cell_width,\
i*self.cell_height,self.cell_width+1,self.cell_height+1))
pygame.display.update()
def reset_player(self):
# Make the sprites for the player.
rect = 0, 0,self.cell_width, self.cell_height
rows=self.rows
cols=self.cols
base = pygame.Surface((self.cell_width, self.cell_height))
base.fill((255,255,255))
self.red = base.copy()
self.green = base.copy()
self.blue_p = base.copy()
self.white = base.copy()
r = (255,0,0)
g = (0,255,0)
b = (0,0,255)
white=(255,255,255)
pygame.draw.ellipse(self.blue_p, b, rect)
pygame.draw.ellipse(self.green, g, rect)
pygame.draw.ellipse(self.white, white, rect)
pygame.draw.ellipse(self.red, r, rect)
#player_laby矩阵,实时储存经过地点
self.player_laby =np.zeros((2*rows+1,2*cols+1), dtype=np.uint8)
for i in range(rows*2+1):
for j in range(cols*2+1):
if self.laby_obj.laby[i,j]==0:
self.screen.blit(base, (j*self.cell_width, i*self.cell_height))
self.screen.blit(self.green, (cols*2*self.cell_width, (rows*2-1)*self.cell_height))
self.cx =0
self.cy =1
self.last_move = None # For last move fun
self.solve_laby=False
def loop(self):
self.clock = pygame.time.Clock()
self.keep_going = 1
while self.keep_going:
moved = 0
self.clock.tick(10)
for event in pygame.event.get():
if event.type == QUIT:
self.keep_going = 0
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.keep_going = 0
if event.key == K_r:
self.reset_player()
if event.key==K_t:
self.solve_laby=True
self.pathes=self.laby_obj.solve_laby(self.cy, self.cx)
if event.key == K_DOWN:
self.move_player('d')
moved = 1
if event.key == K_UP:
self.move_player('u')
moved = 1
if event.key == K_LEFT:
self.move_player('l')
moved = 1
if event.key == K_RIGHT:
self.move_player('r')
moved = 1
keys = pygame.key.get_pressed()
if not moved:
if keys[K_DOWN]:
self.move_player('d')
if keys[K_UP]:
self.move_player('u')
if keys[K_LEFT]:
self.move_player('l')
if keys[K_RIGHT]:
self.move_player('r')
self.draw_player()
pygame.display.update()
def move_player(self, dir):
#M(右,下,visited)
no_move = 0
try:
if dir == 'u':
if not self.laby_obj.N[self.cy-1,self.cx,1]:
self.player_laby[self.cy, self.cx]+= 1
self.cy -= 1
else: no_move = 1
elif dir == 'd':
if not self.laby_obj.N[self.cy,self.cx,1]:
self.player_laby[self.cy, self.cx]+= 1
self.cy += 1
else: no_move = 1
elif dir == 'l':
if not self.laby_obj.N[self.cy,self.cx-1,0]:
self.player_laby[self.cy, self.cx]+= 1
self.cx -= 1
else: no_move = 1
elif dir == 'r':
if not self.laby_obj.N[self.cy,self.cx,0]:
self.player_laby[self.cy, self.cx]+= 1
self.cx += 1
else: no_move = 1
else:
no_move = 1
except KeyError: # Tried to move outside screen
no_move = 1
if ((dir == 'u' and self.last_move == 'd') or \
(dir == 'd' and self.last_move == 'u') or \
(dir == 'l' and self.last_move == 'r') or \
(dir == 'r' and self.last_move == 'l')) and \
not no_move:
self.player_laby[self.cy, self.cx]+= 1
if not no_move:
self.last_move = dir
if self.cx == 2*self.cols and self.cy+1 == 2*self.rows:
self.keep_going = 0
def draw_player(self):
for i in range(self.rows*2+1):
for j in range(self.cols*2+1):
if self.player_laby[i,j] > 0:
self.screen.blit(self.white, (j*self.cell_width, i*self.cell_height))
if self.solve_laby:
for path in self.pathes:
self.screen.blit(self.red, (path[1]*self.cell_width, path[0]*self.cell_height))
self.screen.blit(self.blue_p, (self.cx*self.cell_width, \
self.cy*self.cell_height))
num_rows = int(input("Rows: ")) # 行数
num_cols = int(input("Columns: ")) # 列数
pygame.init()
g = Game(num_rows,num_cols)
g.start()
|
989,807 | c49a294962032ba06d0828d7ed0c01eee1c74a99 | class Ann(object):
def __init__(self, name, padding = 0):
self.value = False
self.name = name
self.width = len(name) + padding
self.empty = " " * self.width
def set(self):
self.value = True
def reset(self):
self.value = False
def text(self):
if self.value:
return self.name.ljust(self.width)
else:
return self.empty
class Anns(object):
def __init__(self):
self.annBAT = Ann("BAT", 3)
self.annUSER = Ann("USER", 3)
self.annG = Ann("G")
self.annRAD = Ann("RAD", 3)
self.annSHIFT = Ann("SHIFT", 3)
self.ann0 = Ann("O")
self.ann1 = Ann("1")
self.ann2 = Ann("2")
self.ann3 = Ann("3")
self.ann4 = Ann("4", 3)
self.annPRGM = Ann("PRGM", 3)
self.annALPHA = Ann("ALPHA")
def text(self):
return self.annBAT.text() + self.annUSER.text() + self.annG.text() + \
self.annRAD.text() + self.annSHIFT.text() + self.ann0.text() + \
self.ann1.text() + self.ann2.text() + self.ann3.text() + \
self.ann4.text() + self.annPRGM.text() + self.annALPHA.text()
class LCD41(object):
def __init__(self):
pass
def image(self, text = "", anns = Anns()):
return '''<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="68mm"
height="13mm"
viewBox="0 0 68 13"
version="1.1"
id="svg8"
inkscape:version="1.0rc1 (09960d6, 2020-04-09)"
sodipodi:docname="lcd.svg">
<defs
id="defs2">
<rect
id="rect8277"
height="4.2763124"
width="2.0045214"
y="45.034915"
x="31.003265" />
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.979899"
inkscape:cx="107.93637"
inkscape:cy="173.03661"
inkscape:document-units="mm"
inkscape:current-layer="layer1"
inkscape:document-rotation="0"
showgrid="false"
inkscape:window-width="1252"
inkscape:window-height="855"
inkscape:window-x="80"
inkscape:window-y="23"
inkscape:window-maximized="0" />
<metadata
id="metadata5">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
style="display:inline">
<rect
style="fill:#000000;stroke-width:0.264583"
id="rect7372"
width="68"
height="13"
x="0"
y="0"
ry="0" />
<rect
style="fill:#dbf0cb;stroke-width:0.264583;fill-opacity:1"
id="rect7374"
width="66"
height="11"
x="1"
y="1"
rx="1"
ry="1"
sodipodi:insensitive="true" />
<text
id="text8269"
y="9.413212"
x="4.067822"
style="font-size:2.11667px;line-height:1.25;font-family:sans-serif;-inkscape-font-specification:'sans-serif, Normal';stroke-width:0.264583"
xml:space="preserve"><tspan
style="stroke-width:0.264583"
y="9.413212"
x="4.067822"
id="tspan8267"
sodipodi:role="line" /></text>
<text
id="text8273"
y="11.417732"
x="2.063301"
style="font-size:2.21667px;line-height:1.25;font-family:'Andale Mono';-inkscape-font-specification:'Andale Mono, Normal';inline-size:64.1291;stroke-width:0.264583"
xml:space="preserve"><tspan
sodipodi:role="line"
x="2.063301"
y="11.417732"><tspan
style="stroke-width:0.264583">{annunciators}</tspan></tspan></text>
<text
style="line-height:1.25;font-family:sans-serif;font-size:3.88055555999999990px;-inkscape-font-specification:'sans-serif, Normal';white-space:pre;shape-inside:url(#rect8277);"
id="text8275"
xml:space="preserve" />
<text
id="text8283"
y="7.542324"
x="2.5"
style="font-size:6.08px;line-height:1.25;font-family:'HP41 Character Set Xtended';-inkscape-font-specification:'HP41 Character Set Xtended';stroke-width:0.264583"
xml:space="preserve"><tspan
style="stroke-width:0.264583"
y="7.542324"
x="2.5"
id="tspan8281"
sodipodi:role="line">{text}</tspan></text>
</g>
</svg>
'''.format(text=text, annunciators=anns.text())
|
989,808 | 2f8ff0f26549d65e7a88b92495b384150d710d0a |
from graphics import *
import random
#win2 (Tela inicial)
win2 = GraphWin('Jogo da Bolinha', 800, 600)
win2.setBackground('green')
titulo = Text(Point(390, 70), 'Soccer Ball')
titulo.setSize(18)
titulo.setStyle('bold')
titulo.setFace('courier')
titulo.draw(win2)
txtnome = Text(Point(400, 280), 'Nome do jogador')
txtnome.draw(win2)
input_box = Entry(Point(400, 300), 13)
input_box.draw(win2)
nome = input_box.getText()
linhaSuperior = Line(Point(0, 40), Point(800, 40))
linhaSuperior.setWidth(3)
linhaSuperior.setOutline("black")
linhaSuperior.setFill('white')
linhaSuperior.draw(win2)
linhaInferior = Line(Point(0, 550), Point(800, 550))
linhaInferior.setWidth(3)
linhaInferior.setOutline('black')
linhaInferior.setFill('white')
linhaInferior.draw(win2)
linhaMeio = Line(Point(0,300), Point(800,300))
linhaMeio.setWidth(3)
linhaMeio.setOutline('black')
linhaMeio.setFill('white')
linhaMeio.draw(win2)
circulo = Circle(Point(400,300), 50)
circulo.setWidth(3)
circulo.setOutline("white")
circulo.draw(win2)
linhaTrave1 = Line(Point(250,550), Point(250,530))
linhaTrave1.setWidth(3)
linhaTrave1.setOutline('white')
linhaTrave1.setFill('white')
linhaTrave1.draw(win2)
linhaTrave2 = Line(Point(540,550), Point(540,530))
linhaTrave2.setWidth(3)
linhaTrave2.setOutline('white')
linhaTrave2.setFill('white')
linhaTrave2.draw(win2)
linhaTrave3 = Line(Point(540,60), Point(540,40))
linhaTrave3.setWidth(3)
linhaTrave3.setOutline('white')
linhaTrave3.setFill('white')
linhaTrave3.draw(win2)
linhaTrave4 = Line(Point(250,60), Point(250,40))
linhaTrave4.setWidth(3)
linhaTrave4.setOutline('white')
linhaTrave4.setFill('white')
linhaTrave4.draw(win2)
retangulo1 = Rectangle(Point(80, 80), Point(116, 100))
retangulo1.setFill("black")
retangulo1.setOutline("black")
retangulo1.draw(win2)
retangulo2 = Rectangle(Point(600, 80), Point(636, 100))
retangulo2.setFill("black")
retangulo2.setOutline("black")
retangulo2.draw(win2)
retangulo3 = Rectangle(Point( 500, 200), Point(536, 220))
retangulo3.setFill("black")
retangulo3.setOutline("black")
retangulo3.draw(win2)
retangulo = Rectangle(Point(200, 200), Point(236, 220))
retangulo.setFill("black")
retangulo.setOutline("black")
retangulo.draw(win2)
win2.getMouse()
win2.close()
#win (tela do jogo)
win = GraphWin("Bolinha ...", 800, 600)
win.setBackground('green')
linhaSuperior = Line(Point(0, 40), Point(800, 40))
linhaSuperior.setWidth(3)
linhaSuperior.setOutline("black")
linhaSuperior.setFill('white')
linhaSuperior.draw(win)
linhaInferior = Line(Point(0, 550), Point(800, 550))
linhaInferior.setWidth(3)
linhaInferior.setOutline('black')
linhaInferior.setFill('white')
linhaInferior.draw(win)
linhaMeio = Line(Point(0,300), Point(800,300))
linhaMeio.setWidth(3)
linhaMeio.setOutline('black')
linhaMeio.setFill('white')
linhaMeio.draw(win)
circulo = Circle(Point(400,300), 50)
circulo.setWidth(3)
circulo.setOutline("white")
circulo.draw(win)
# Texto Vidas
txtv = Text(Point(730, 560), 'VIDAS:')
txtv.setFace('courier')
txtv.setStyle('bold')
txtv.draw(win)
# Bolinhas da vida
b1 = Circle(Point(705, 580), 10)
b1.setOutline('black')
b1.setFill('white')
b1.draw(win)
b2 = Circle(Point(728, 580), 10)
b2.setOutline('black')
b2.setFill('white')
b2.draw(win)
b3 = Circle(Point(751, 580), 10)
b3.setOutline('black')
b3.setFill('white')
b3.draw(win)
vidas = 3
col = 390
lin = 80
raio = 15
circulo = Circle(Point(col, lin), raio)
circulo.setFill(color_rgb(250, 10, 200))
circulo.draw(win)
initial_speed = 5
velocity_increment = 1
ptsstring = 0
pontos = Text(Point(400, 575), " " + str(ptsstring))
pontos.setSize(14)
pontos.draw(win)
pts = 0
colIni = 10
tamanho = 100
barra = Line(Point(colIni, 530), Point(colIni + tamanho, 530))
barra.setOutline('black')
barra.setFill('black')
barra.setWidth(10)
barra.draw(win)
velocidade = 5
bateu = True
continuar = True
while continuar:
if bateu:
passo = random.randrange(1, 10)
if random.random() < 0.5:
passo = -passo
bateu = False
if (col + raio + passo) > 800:
passo = -passo
if (col - raio + passo) < 0:
passo = -passo
if lin < 65:
velocidade = -velocidade
if 515 <= lin <= 530 and colIni < col < (colIni + tamanho):
pts += 1
velocidade = initial_speed + (pts * velocity_increment)
velocidade = -velocidade
pontos.setText(pts)
# Nova posição do círculo
circulo.undraw()
col += passo
lin += velocidade
circulo = Circle(Point(col, lin), 15)
circulo.setFill(color_rgb(250, 10, 200))
circulo.draw(win)
# MECANISMO DE VIDAS
if lin >= 550:
circulo.undraw()
vidas = vidas - 1
if vidas > 0:
col = 390
lin = 80
raio = 15
circulo = Circle(Point(col, lin), raio)
circulo.setFill(color_rgb(250, 10, 200))
circulo.draw(win)
circulo.undraw()
col += passo
lin += velocidade
circulo = Circle(Point(col, lin), 15)
circulo.setFill(color_rgb(250, 10, 200))
circulo.draw(win)
if vidas >= 2:
b1.undraw()
else:
b2.undraw()
# GAMEOVER A PARTIR DAQUI
else: # elif vidas == 0:
win.close()
win3 = GraphWin('FIM DE JOGO', 600, 600)
win3.setBackground('black')
gameovertxt = Text(Point(300, 100), 'FIM DE JOGO')
gameovertxt.setFace('courier')
gameovertxt.setSize(20)
gameovertxt.setTextColor('red')
placar = Text(Point(300, 250), pts)
placar.setFace('courier')
placar.setSize(18)
placar.setTextColor('Red')
pontos = Text(Point(310,300),'defesas')
pontos.setFace('courier')
pontos.setSize(18)
pontos.setTextColor('red')
nome1 = Text(Point(300, 200), input_box.getText())
nome1.setFace('courier')
nome1.setSize(18)
nome1.setTextColor('red')
nome1.draw(win3)
placar.draw(win3)
pontos.draw(win3)
gameovertxt.draw(win3)
win3.getMouse()
win3.close()
# Movimento horizontal da barra pelas setas direita/esquerda
tecla = win.checkKey()
# Sair do joguinho
if tecla == "Escape":
continuar = False
continue
if tecla == "Right":
if (colIni + 20) < 701:
colIni = colIni + 20
barra.undraw()
barra = Line(Point(colIni, 530), Point(colIni + 100, 530))
barra.setOutline('black')
barra.setFill('black')
barra.setWidth(10)
barra.draw(win)
if tecla == "Left":
if (colIni - 20) > -1:
colIni = colIni - 20
barra.undraw()
barra = Line(Point(colIni, 530), Point(colIni + 100, 530))
barra.setOutline('black')
barra.setFill('black')
barra.setWidth(10)
barra.draw(win)
# Esperar o ser humano reagir
time.sleep(.05)
win.close()
|
989,809 | f40325084c8f349b79db38318664acffe1527eb5 | nums = input().split(' ')
total = int(nums[0])
l = int(nums[1])
r = int(nums[2])
small = pow(2,l)-1+total-l
big = pow(2,r)-1+(total-r)*pow(2,r-1)
print(small,end=" ")
print(big)
|
989,810 | 3f4866d361d382a242f2b5ca7c86c2fefeede2e5 | import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append('./')
sys.path.append('../')
from layers.modules.l2norm import *
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.Relu(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5,
conv6,
nn.ReLU(inplace=True),
conv7,
nn.ReLU(inplace=True)]
return layers
base = [64, 64, 'M',
128, 128, 'M',
256, 256, 256, 'C',
512, 512, 512, 'M',
512, 512, 512]
vgg_base = vgg(base, 3)
print("vgg_base ", vgg_base)
def add_extra(cfg, i, batch_norm=False):
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S': # 缩放特征图 stride=2
layers += [
nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1,3)[flag],
stride=2, padding=1)
]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
def multibox(vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [21, -2] # vgg网络的第21层和倒数第2层
for k, v in enumerate(vgg_source):
loc_layers += [
nn.Conv2d(vgg[v].out_channels, cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [
nn.Conv2d(vgg[v].out_channels, cfg[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):
loc_layers += [
nn.Conv2d(v.out_channels, cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [
nn.Conv2d(v.out_channels, cfg[k] * num_classes, kernel_size=3, padding=1)]
return vgg, extra_layers, (loc_layers, conf_layers)
from math import sqrt as sqrt
from itertools import product as product
voc = {
'num_classes': 21,
'lr_steps': (80000, 100000, 120000),
'max_iter': 120000,
'feature_maps': [38, 19, 10, 5, 3, 1],
'min_dim': 300,
'image_size': 300,
'steps': [8, 16, 32, 64, 100, 300],
'min_sizes': [30, 60, 111, 162, 213, 264],
'max_sizes': [60, 111, 162, 213, 264, 315],
'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
'variance': [0.1, 0.2],
'clip': True,
'name': 'VOC',
}
class PriorBox(nn.Module):
"""Compute priorbox coordinates in center-offset form for each source
feature map.
"""
# 由配置文件而来的一些配置信息
def __init__(self, cfg):
super(PriorBox, self).__init__()
self.image_size = cfg['min_dim']
# number of priors for feature map location (either 4 or 6)
self.num_priors = len(cfg['aspect_ratios'])
self.variance = cfg['variance'] or [0.1]
self.feature_maps = cfg['feature_maps']
self.min_sizes = cfg['min_sizes']
self.max_sizes = cfg['max_sizes']
self.steps = cfg['steps']
self.aspect_ratios = cfg['aspect_ratios']
self.clip = cfg['clip']
self.version = cfg['name']
for v in self.variance:
if v <= 0:
raise ValueError('Variances must be greater than 0')
# 生成所有的priorbox需要相应特征图的信息
def forward(self):
mean = []
for k, f in enumerate(self.feature_maps): # 'feature_maps': [38, 19, 10, 5, 3, 1],
for i, j in product(range(f), repeat=2):
# f_k 为每个特征图的尺寸
f_k = self.image_size / self.steps[k] # self.image_size=300 'steps': [8, 16, 32, 64, 100, 300]
# 求每个box的中心坐标 将中心点坐标转化为 相对于 特征图的 相对坐标 (0,1)
cx = (j + 0.5) / f_k
cy = (i + 0.5) / f_k
# 对应{Sk,Sk}大小的priorbox
s_k = self.min_sizes[k] / self.image_size # 'min_sizes': [30, 60, 111, 162, 213, 264],
mean += [cx, cy, s_k, s_k]
# 对应{sqrt(Sk*Sk+1), sqrt(Sk*Sk+1)}大小的priorbox
s_k_prime = sqrt(
s_k * (self.max_sizes[k] / self.image_size)) # 'max_sizes': [60, 111, 162, 213, 264, 315]
mean += [cx, cy, s_k_prime, s_k_prime]
# 对应比例为2、 1/2、 3、 1/3的priorbox
for ar in self.aspect_ratios[k]: # 'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
mean += [cx, cy, s_k * sqrt(ar), s_k / sqrt(ar)]
mean += [cx, cy, s_k / sqrt(ar), s_k * sqrt(ar)]
# 将所有的priorbox汇集在一起
output = torch.Tensor(mean).view(-1, 4)
if self.clip:
output.clamp_(max=1, min=0)
return output
extra = [256, 'S', 512,
128, 'S', 256,
128, 256,
128, 256]
conv_extras = add_extra(extra, 1024)
print("conv_extras " , conv_extras)
# 每个特征图上一个点对应priorbox的数量
mbox = [4, 6, 6, 6, 4, 4]
base_, extra_, head_ = multibox(vgg_base, conv_extras, mbox, num_classes=21)
print("head_ ", head_)
print("==============" * 12)
priorbox = PriorBox(voc) # 实例化一个对象 之后 才可调用对象里的输出
output = priorbox()
print("output ", output, " size ", output.size())
mean = []
for k, f in enumerate(voc["feature_maps"]): # 'feature_maps': [38, 19, 10, 5, 3, 1],
print("k , f ", k , " ", f, " rangf ", range(f))
productRes = product(range(f), repeat=2)
print("productRes ", productRes)
# 300 / 8 , 16, 32.... 300
f_k = voc["min_dim"] / voc["steps"][k]
print("f_k ", f_k)
s_k = voc["min_sizes"][k] / voc["image_size"]
print("s_k", s_k)
for i, j in productRes:
#traverse every cell of feature map
#print(" i j ", i , " " , j)
#central point
cx = (j + 0.5) / f_k
cy = (i + 0.5) / f_k
#print(" cx cy ", cx, " ", cy)
mean += [cx, cy, s_k, s_k]
# 对应{sqrt(Sk*Sk+1), sqrt(Sk*Sk+1)}大小的priorbox
s_k_prime = sqrt(s_k * (voc["max_sizes"][k] / voc["image_size"])) # 'max_sizes': [60, 111, 162, 213, 264, 315]
mean += [cx, cy, s_k_prime, s_k_prime]
for ar in voc["aspect_ratios"][k]: # 'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
mean += [cx, cy, s_k * sqrt(ar), s_k / sqrt(ar)]
mean += [cx, cy, s_k / sqrt(ar), s_k * sqrt(ar)]
mean = torch.Tensor(mean).view(-1, 4)
print("mean ", mean)
class SSD(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base VGG network followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
Args:
phase: (string) Can be "test" or "train"
size (int): input image size
base (model_object): VGG16 layers for input, size of either 300 or 500
extras (model_object): extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, head, num_classes):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.cfg = voc
self.priorbox = PriorBox(self.cfg)
self.priors = self.priorbox.forward()
self.size = size
# SSD network
self.vgg = nn.ModuleList(base)
# Layer learns to scale the l2 normalized features from conv4_3
self.L2Norm = L2Norm(512, 20)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0]) # head 由muti_layer传进来的参数
self.conf = nn.ModuleList(head[1])
if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
# sources保存特征图,loc与conf保存所有PriorBox的位置与类别预测特征
sources = list()
loc = list()
conf = list()
# 对输入图像卷积到conv4_3,将特征添加到sources中
for k in range(23):
x = self.vgg[k](x)
s = self.L2Norm(x)
sources.append(s)
# 继续卷积到conv7,将特征添加到sources中
for k in range(23, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
# 继续利用额外的卷积层计算,并将特征添加到sources中
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1: # 间隔一层
sources.append(x)
# 对sources中的特征图利用类别与位置网络进行卷积计算,并保存到loc与conf中
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = self.detect(
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(conf.size(0), -1, self.num_classes)), # conf preds
self.priors.type(type(x.data)) # default boxes
)
else:
# 对于训练来说,output包括了loc与conf的预测值以及PriorBox的信息
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
self.priors
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file,
map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def build_ssd(phase, size=300, num_classes=21):
if phase != "test" and phase != "train":
print("ERROR: Phase: " + phase + " not recognized, it must be either 'test' or 'train'")
return
# 利用上面的vgg_base与conv_extras网络,生成类别与位置预测网络head_
base_, extras_, head_ = multibox(
vgg(base, 3),
conv_extras,
mbox,
num_classes)
return SSD(phase, size, base_, extras_, head_, num_classes)
ssd = build_ssd('train')
print("ssd ", ssd)
|
989,811 | 1940cefd3503f96f2cf67b7609f312f94fc4cff4 | ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import config
import subprocess
from flask import request
from flask_restful import Resource, reqparse
import logging
import base64
from decorators import private_api
from requests import get
import json
import ast
import shlex
import sys
import errors
import os
import uuid
import re
import random
import string
import shutil
logger = logging.getLogger("api")
class Job(Resource):
@private_api
def get(self):
"""
Return information for a given job
---
tags:
- Scheduler
parameters:
- in: body
name: body
schema:
optional:
- job_id
properties:
job_id:
type: string
description: ID of the job
responses:
200:
description: List of all jobs
500:
description: Backend error
"""
parser = reqparse.RequestParser()
parser.add_argument("job_id", type=str, location="args")
args = parser.parse_args()
job_id = args["job_id"]
if job_id is None:
return errors.all_errors(
"CLIENT_MISSING_PARAMETER", "job_id (str) parameter is required"
)
try:
qstat_command = config.Config.PBS_QSTAT + " -f " + job_id + " -Fjson"
try:
get_job_info = subprocess.check_output(shlex.split(qstat_command))
try:
sanitize_input = get_job_info.decode("utf-8")
for match in re.findall(
'"project":(\d+),', sanitize_input, re.MULTILINE
):
# Clear case where project starts with digits to prevent leading zero errors
print(
f'Detected "project":{match}, > Will be replaced to prevent int leading zero error'
)
sanitize_input = sanitize_input.replace(
f'"project":{match},', f'"project":"{match}",'
)
job_info = ast.literal_eval(sanitize_input)
except Exception as err:
return {
"success": False,
"message": "Unable to retrieve this job. Job may have terminated. Error: "
+ str(job_info),
}, 210
job_key = list(job_info["Jobs"].keys())[0]
return {"success": True, "message": job_info["Jobs"][job_key]}, 200
except Exception as err:
return {
"success": False,
"message": "Unable to retrieve Job ID (job may have terminated and is no longer in the queue)",
}, 210
except Exception as err:
return {"success": False, "message": "Unknown error: " + str(err)}, 500
@private_api
def post(self):
"""
Submit a job to the queue
---
tags:
- Scheduler
parameters:
- in: body
name: body
schema:
required:
- payload
optional:
- interpreter
properties:
payload:
type: string
description: Base 64 encoding of a job submission file
interpreter:
type: string
description: Interpreter to use qsub or bash
responses:
200:
description: Job submitted correctly
500:
description: Backend error
"""
parser = reqparse.RequestParser()
parser.add_argument("payload", type=str, location="form")
parser.add_argument("interpreter", type=str, location="form")
parser.add_argument("input_file_path", type=str, location="form")
args = parser.parse_args()
try:
payload = base64.b64decode(args["payload"]).decode()
except KeyError:
return errors.all_errors(
"CLIENT_MISSING_PARAMETER", "payload (base64) parameter is required"
)
except UnicodeError:
return errors.all_errors(
"UNICODE_ERROR", "payload (str) does not seems to be a valid base64"
)
except Exception as err:
return errors.all_errors(type(err).__name__, err)
try:
request_user = request.headers.get("X-SOCA-USER")
if request_user is None:
return errors.all_errors("X-SOCA-USER_MISSING")
# Basic Input verification
check_job_name = re.search(r"#PBS -N (.+)", payload)
check_job_project = re.search(r"#PBS -P (.+)", payload)
if check_job_name:
sanitized_job_name = re.sub(
r"\W+", "", check_job_name.group(1)
) # remove invalid char,space etc...
payload = payload.replace(
"#PBS -N " + check_job_name.group(1),
"#PBS -N " + sanitized_job_name,
)
else:
sanitized_job_name = ""
if check_job_project:
sanitized_job_project = re.sub(
r"\W+", "", check_job_project.group(1)
) # remove invalid char,space etc...
payload = payload.replace(
"#PBS -P " + check_job_project.group(1),
"#PBS -P " + sanitized_job_project,
)
if args["interpreter"] is None:
interpreter = config.Config.PBS_QSUB
else:
interpreter = args["interpreter"]
try:
random_id = "".join(
random.choice(string.ascii_letters + string.digits)
for i in range(10)
)
job_submit_file = "job_submit_" + str(random_id) + ".sh"
group_ownership = f"{request_user}{config.Config.GROUP_NAME_SUFFIX}"
if args["input_file_path"]:
job_output_path = args["input_file_path"]
else:
# create new job directory if needed
job_output_folder = (
config.Config.USER_HOME
+ "/"
+ request_user
+ "/soca_job_output/"
)
job_output_path = (
job_output_folder + sanitized_job_name + "_" + str(random_id)
)
os.makedirs(job_output_path)
os.chmod(job_output_folder, 0o700)
shutil.chown(
job_output_folder, user=request_user, group=group_ownership
)
shutil.chown(
job_output_path, user=request_user, group=group_ownership
)
os.chmod(job_output_path, 0o700)
os.chdir(job_output_path)
with open(job_submit_file, "w") as text_file:
text_file.write(payload)
shutil.chown(
job_output_path + "/" + job_submit_file,
user=request_user,
group=group_ownership,
)
os.chmod(job_output_path + "/" + job_submit_file, 0o700)
submit_job_command = interpreter + " " + job_submit_file
launch_job = subprocess.check_output(
["su", request_user, "-c", submit_job_command],
stderr=subprocess.PIPE,
)
if interpreter == config.Config.PBS_QSUB:
job_id = ((launch_job.decode("utf-8")).rstrip().lstrip()).split(
"."
)[0]
return {"success": True, "message": str(job_id)}, 200
else:
return {
"success": True,
"message": "Your Linux command has been executed successfully. Output (if any) can be accessed on <a href='/my_files?path="
+ job_output_path
+ "'>"
+ job_output_path
+ "</a>",
}, 200
except subprocess.CalledProcessError as e:
return {
"success": False,
"message": {
"error": "Unable to submit the job. Please verify your script file (eg: malformed inputs, syntax error, extra space in the PBS variables ...) or refer to the 'stderr' message.",
"stderr": "{}".format(
e.stderr.decode(sys.getfilesystemencoding())
),
"stdout": "{}".format(
e.output.decode(sys.getfilesystemencoding())
),
"job_script": str(payload),
},
}, 500
except Exception as err:
return {
"success": False,
"message": {
"error": "Unable to run Qsub command.",
"trace": str(err),
"job_script": str(payload),
},
}, 500
except Exception as err:
return errors.all_errors(type(err).__name__, err)
@private_api
def delete(self):
"""
Delete a job from the queue
---
tags:
- Scheduler
parameters:
- in: body
name: body
schema:
required:
- job_id
properties:
job_id:
type: string
description: ID of the job to remove
responses:
200:
description: Job submitted correctly
500:
description: Backend error
"""
parser = reqparse.RequestParser()
parser.add_argument("job_id", type=str, location="form")
args = parser.parse_args()
job_id = args["job_id"]
if job_id is None or job_id == "":
return errors.all_errors(
"CLIENT_MISSING_PARAMETER", "job_id (str) parameter is required"
)
get_job_info = get(
config.Config.FLASK_ENDPOINT + "/api/scheduler/job",
headers={"X-SOCA-TOKEN": config.Config.API_ROOT_KEY},
params={"job_id": job_id},
verify=False,
) # nosec
if get_job_info.status_code != 200:
return {
"success": False,
"message": "Unable to retrieve this job. Job may have terminated",
}, 500
else:
job_info = get_job_info.json()["message"]
job_owner = job_info["Job_Owner"].split("@")[0]
request_user = request.headers.get("X-SOCA-USER")
if request_user is None:
return errors.all_errors("X-SOCA-USER_MISSING")
if request_user != job_owner:
return errors.all_errors("CLIENT_NOT_OWNER")
try:
qdel_command = config.Config.PBS_QDEL + " " + job_id
try:
delete_job = subprocess.check_output(shlex.split(qdel_command))
return {"success": True, "message": "Job deleted"}
except Exception as err:
return {
"success": False,
"message": "Unable to execute qdel command: " + str(err),
}, 500
except Exception as err:
return {"success": False, "message": "Unknown error: " + str(err)}, 500
|
989,812 | 93616df182566dfd1a6707dc66f711153da347aa | # Generated by Django 3.0.4 on 2021-10-05 09:27
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hotels', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='room',
name='agentSyncTime',
field=models.DateTimeField(default=datetime.datetime.now, null=True, verbose_name='代理商同步时间'),
),
migrations.AlterField(
model_name='room',
name='roomSyncTime',
field=models.DateTimeField(default=datetime.datetime.now, null=True, verbose_name='酒店同步时间'),
),
]
|
989,813 | 6adcd05dc22f8c201be79f4e9f4caa26bcf2b7cc | '''
Created on Feb 8, 2020
@author: scott-p-lane
'''
from enum import Enum
class GridOrientation(Enum):
left = 0
up = 1
right = 2
down = 3
class TurnDirection(Enum):
left = 0
right = 1
class GridCoordinates (object):
"""
Dictionary based implementation that represents grid coordinates and state for
each coordinate. For certain types of grid related problems, this can be more optimal
than a large, multi-dimensaional grid. Also useful in situations where grid sizes
are not known up front.
"""
def __init__(self, defaultVal=0):
"""
Parameters
----------
defaultVal: optional
Default value of any coordinate in the grid that was not explicitly
assigned a value. (default is 0)
"""
self.grid = {}
self.defaultVal = defaultVal
#orientation is used to specify the direction we are facing/moving within the grid
self.orientation = [GridOrientation.left, GridOrientation.up,
GridOrientation.right,GridOrientation.down]
self.currentOrientation = GridOrientation.up
self.currentRow = 0
self.currentCol = 0
self.maxRow = 0
self.minRow = 0
self.maxCol = 0
self.minCol = 0
def changeOrientation(self,turnDirection: TurnDirection) -> GridOrientation:
"""
Changes orientation by accepting a direction (l or r) and turning
"1 step" in that direction.
Parameters:
------------
turnDirection : str
Values are "l" (left), or "r" (right)
"""
orientationIndex = self.currentOrientation.value
if turnDirection == TurnDirection.left:
orientationIndex -= 1
else:
orientationIndex += 1
if (orientationIndex > len(self.orientation) - 1):
orientationIndex = 0
if (orientationIndex < 0):
orientationIndex = len(self.orientation) - 1
self.currentOrientation = self.orientation[orientationIndex]
return self.currentOrientation
def __createkey__(self):
"""
Constructs a key for a coordinate using currentCol and currentRow values.
"""
return str(self.currentCol) + "," + str(self.currentRow)
def createKey(self,colIndex,rowIndex):
return str(colIndex) + "," + str(rowIndex)
def processedCoordinate(self):
"""
Returns tuple in the form of (col,row,processed:bool)
Which establishes the current coordinate and whether or not we have processed it before.
Processing indicates that we explicitly performed an operation on it (like setting a value).
"""
vals = self.getCoordinate()
vals[-1] = False
gridkey = self.__createkey__()
if gridkey in self.grid.keys():
vals[-1] = True
return vals
def setCoordinateValue(self,coordVal):
"""
Sets the current coordinate to the specified value.
Returns coordinate value (see getCoordinate)
"""
gridkey = self.__createkey__()
self.grid[gridkey] = coordVal
return self.getCoordinate()
def advance(self,distance = 1):
"""
Advances specified distance in current orientation (default distance is 1)
and returns coordinate value (see getCoordinate)
"""
colOffset = 0
rowOffset = 0
if self.currentOrientation == GridOrientation.left:
colOffset = -1 * distance
if self.currentOrientation == GridOrientation.right:
colOffset = distance
if self.currentOrientation == GridOrientation.down:
rowOffset = -1 * distance
if self.currentOrientation == GridOrientation.up:
rowOffset = distance
self.currentCol += colOffset
self.currentRow += rowOffset
#See if we've expanded the grid
if self.currentCol > self.maxCol:
self.maxCol = self.currentCol
if self.currentCol < self.minCol:
self.minCol = self.currentCol
if self.currentRow > self.maxRow:
self.maxRow = self.currentRow
if self.currentRow < self.minRow:
self.minRow = self.currentRow
return self.getCoordinate()
def getCoordinate(self):
return self.getCoordinateAt(self.currentCol,self.currentRow)
def getCoordinateAt(self,colIndex,rowIndex):
"""
Returns tuple in the form of (col,row,val)
"""
gridval = self.grid.get(self.createKey(colIndex,rowIndex),self.defaultVal)
retvals = [self.currentCol,self.currentRow,gridval]
return retvals
def rowCount(self):
"""
Returns absolute number of rows in the grid.
"""
return abs(self.minRow) + abs(self.maxRow)
def columnCount(self):
"""
Returns absolute number of columns in the grid.
"""
return abs(self.minCol) + abs(self.maxCol)
def renderGridRow(self,rowIndex,whitespaceSet=[]):
"""
Renders the specified row of a grid (first row is 0).
Uses "whitespace set" such that any value at that coordinate in the whitespace set
will simply be outputted as a space.
"""
rowstr = ""
internalRowIndex = self.minRow + rowIndex
for c in range(self.minCol,self.maxCol,1):
gridval = self.grid.get(self.createKey(c,internalRowIndex),self.defaultVal)
if gridval not in whitespaceSet:
rowstr += str(gridval)
else:
rowstr += " "
return rowstr
|
989,814 | 9cd5e95a976113159479bcdcc231b9f475a6f19f | import cntk as C
import numpy as np
import pandas as pd
x = C.input_variable(2)
y = C.input_variable(2)
x0 = np.asarray([[2., 1.]], dtype=np.float32)
y0 = np.asarray([[4., 6.]], dtype=np.float32)
res = C.squared_error(x, y).eval({x:x0, y:y0})
print type(res) |
989,815 | e53cbb6c7022b87dee05376e03a796ecc91ec37a | """
Suite of tools for gridding multibeam bathymetric surveys and calculating
bed form flux.
version: 0.1.3
"""
import os, glob
import pandas as pd
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
import datetime as dt
class Raw(object):
def __init__(self, datadir):
""" Instantiates a multibeam raw data object """
print('\rStatus: Verifying Data', end = "\t\t\t\r")
self.datadir = datadir
# Load file info and verify data validity
info = pd.read_csv(os.path.join(self.datadir, 'info.csv'))
nt = len(info)
self.t = [dt.datetime.strptime(str(info.ix[i,'DATE'])
+ str(info.ix[i,'TIME']),
'%Y%m%d%H%M')
for i in range(nt)]
self.xyzdir = os.path.join(datadir, 'xyz')
self.xyznames = info.ix[:,'FILENAME']
self.xyzpaths = [os.path.join(self.xyzdir, self.xyznames[i])
for i in range(nt)]
xyzglobpath = os.path.join(self.datadir, 'xyz', '*')
assert (set(self.xyzpaths).issubset(set(glob.glob(xyzglobpath)))), \
"FILENAME listed in 'info.csv' not found in datadir"
# ================================================================
def _rotate_xy(self, xyz, angle):
""" Rotates every point in an xyz array about the origin by 'angle' """
theta = np.deg2rad(angle)
x = xyz[:,0].copy()
y = xyz[:,1].copy()
xyz[:,0] = x * np.cos(theta) - y * np.sin(theta)
xyz[:,1] = x * np.sin(theta) + y * np.cos(theta)
xyz[:,2] = xyz[:,2]
return xyz
# ================================================================
def _generate_grid(self, xyz, dx, dy):
"""Gengenerate_grid(xyz, dx, dy)erate location data based on input XYZ data"""
origin = np.amin(xyz,0)
extent = np.amax(xyz,0)-origin
ncells = (np.amax(xyz,0)-origin)//[dx,dy,1]
# Account for remainder
origin += [(extent[0] % dx) / 2, (extent[1] % dy) / 2, 0]
xbnds = np.linspace(0, ncells[0] * dx, ncells[0] + 1)
ybnds = np.linspace(0, ncells[1] * dy, ncells[1] + 1)
return origin, xbnds, ybnds, extent[2]
# ================================================================
def _gridloc(self, nddata, bnds, axis):
"""Return grid indices of every point in xyz"""
assert(axis < nddata.ndim), "axis > ndim"
nddata = nddata[nddata[:,axis].argsort()]
loc = np.searchsorted(bnds, nddata[:,axis]) - 1
return nddata, loc
# ================================================================
def _smooth_profile(self, x, y, xbnds, usepts, nanrad):
""" Smooth a single profile of 1D unstructured data """
xz = np.hstack((x[:,np.newaxis],y[:,np.newaxis]))
xz, xloc = self._gridloc(xz, xbnds, 0)
dx = np.diff(xbnds)
xnew = xbnds[:-1] + 0.5 * dx
nanmask = np.array([np.sum(np.abs(xloc - ix+1) <= nanrad) == 0 \
for ix in range(len(xnew))])
frac = usepts/xz.shape[0]
with np.errstate(invalid='ignore'):
try:
w = sm.nonparametric.lowess(xz[:,1], xz[:,0], frac=frac,
delta=np.nanmean(dx), it=0)
znew = np.interp(xnew, w[:,0], w[:,1])
except:
znew = np.nan
znew[nanmask] = np.nan
return xnew, znew
# ================================================================
def _grid_xyz(self, xyz, xbnds, ybnds, zmax, usepts, nanrad):
nx = len(xbnds) - 1
ny = len(ybnds) - 1
xyz, yloc = self._gridloc(xyz, ybnds, 1)
Z = np.empty((ny, nx), 'float')
for i in range(ny):
xz = xyz[np.ix_(yloc==i, [0,2])]
if len(xz) < usepts:
Z[i,:] = np.nan
else:
with np.errstate(invalid='ignore'):
_, Z[i] = self._smooth_profile(xz[:,0], xz[:,1], xbnds,
usepts=usepts,
nanrad=nanrad)
Z[np.ma.fix_invalid(Z, fill_value = np.inf) > zmax] = np.nan
Z[np.ma.fix_invalid(Z, fill_value = 0) < 0] = np.nan
return Z
# ================================================================
def plot_timestep(self, survey, rot_deg=0, markersize=5):
"""Load a single xyz file and plot datapoints colored by elevation.
Intended to assist in determining flow azimuth.
"""
xyz = self._rotate_xy(np.loadtxt(self.xyzpaths[survey]), rot_deg)
plt.clf()
fig1 = plt.figure(1)
ax = fig1.add_subplot(111)
sc = ax.scatter(xyz[:,0], xyz[:,1], c = xyz[:,2],
marker = 'o',
s = markersize, lw = 0,
cmap = 'viridis')
fig1.colorbar(sc)
ax.axis('equal')
ax.set_title('Raw data for survey {0}'.format(survey))
ax.set_xlabel('X coordinate')
ax.set_ylabel('Y coordinate')
plt.show()
return ax
# ================================================================
def plot_smooth(self, survey, xsect, rot_deg, dx=1, dy=1, usepts=20, nanrad=2):
"""Plot raw data and smoothed transect for QC.
Use this function to test gridding parameters.
"""
xyz = self._rotate_xy(np.loadtxt(self.xyzpaths[survey]), rot_deg)
origin, xbnds, ybnds, zmax = self._generate_grid(xyz, dx, dy)
xyz -= origin
xyz, yloc = self._gridloc(xyz, ybnds, 1)
xz = xyz[np.ix_(yloc==xsect, [0,2])]
xnew, znew = self._smooth_profile(xz[:,0], xz[:,1], xbnds, usepts, nanrad)
plt.clf()
fig1 = plt.figure(1)
ax = fig1.add_subplot(111)
ax.scatter(xz[:,0], xz[:,1])
ax.plot(xnew, znew, 'r')
ax.set_title('Transect Smoothing')
ax.set_xlabel('Streamwise (x) coordinate')
ax.set_ylabel('Z coordinate')
plt.show()
return ax
def plot_gridded(self, survey, rot_deg, dx=1, dy=1, usepts=20, nanrad=2):
""" Plot a single gridded survey for QC """
# load data
xyz = self._rotate_xy(np.loadtxt(self.xyzpaths[survey]), rot_deg)
origin, xbnds, ybnds, zmax = self._generate_grid(xyz, dx, dy)
xyz -= origin
xyz, yloc = self._gridloc(xyz, ybnds, 1)
# grid data
zarr = self._grid_xyz(xyz, xbnds, ybnds, zmax, usepts=usepts, nanrad=2)
xarr, yarr = np.meshgrid(xbnds[:-1], ybnds[:-1])
# plot
fig1 = plt.figure(1)
ax = fig1.add_subplot(111)
ax.pcolormesh(xarr, yarr, zarr, cmap = 'viridis', vmin = 0, vmax = zmax)
ax.colorbar()
ax.axis('tight')
plt.show()
return ax
# ============================================================================
class Raster(Raw):
def __init__(self, datadir, save, ref_xyz, flow_azimuth, dx, dy,
usepts, nanrad):
""" Return a gridded data object """
Raw.__init__(self, datadir)
self.ref_xyz = ref_xyz
self.flow_azimuth = flow_azimuth
self.dx = dx
self.dy = dy
# Generate Grid based on reference survey
refdat = self._rotate_xy(np.loadtxt(self.xyzpaths[ref_xyz]), self.flow_azimuth)
self.origin, self.xbnds, self.ybnds, self.zmax = \
self._generate_grid(refdat, self.dx, self.dy)
nt = len(self.t)
nx = len(self.xbnds) - 1
ny = len(self.ybnds) - 1
# Make raster directory if it doesn't exist
self.rasterdir = os.path.join(datadir, 'raster')
if save == True and not os.path.exists(self.rasterdir):
os.mkdir(self.rasterdir)
# Generate list of raster pathnames
rnames = ['r{0}.npy'.format(os.path.splitext(self.xyznames[i])[0]) \
for i in range(nt)]
self.rpaths = [os.path.join(self.rasterdir, rnames[i]) for i in range(nt)]
self.completed = glob.glob(os.path.join(self.rasterdir, '*'))
# For every survey
self.Zarr = np.empty((nt, ny, nx), 'float')
for i in range(nt):
print('\rStatus: Gridding Survey {0}/{1}'.format(i+1, nt), end = "\t\t\t\r")
# if the raster file already exists:
if self.rpaths[i] in self.completed:
# Load completed raster
self.Zarr[i] = np.load(self.rpaths[i])
else:
# Load point clouds
xyz = self._rotate_xy(np.loadtxt(self.xyzpaths[i]), flow_azimuth)
xyz -= self.origin
xyz, yloc = self._gridloc(xyz, self.ybnds, 1)
# grid pointcloud
zarr = self._grid_xyz(xyz, self.xbnds, self.ybnds,
self.zmax, usepts=usepts,
nanrad=nanrad)
# save raster and store in Zarr
if save == True:
np.save(self.rpaths[i], zarr)
elif save == False:
pass
else:
raise Exception('save must be boolean')
self.Zarr[i] = zarr
# record progress
self.completed.append(self.rpaths[i])
# =========================================================
def _nan_helper(self, y):
""" Helper to handle indices and logical indices of NaNs."""
return np.isnan(y), lambda z: z.nonzero()[0]
# =========================================================
def _remove_outliers(self, data, nsigma):
""" Iteratively removes outliers from data defined by nsigma """
while (np.abs(np.ma.fix_invalid(data)-np.nanmean(data)) > nsigma *
np.nanstd(data)).any():
data[np.where(np.abs(np.ma.fix_invalid(data)-np.nanmean(data)) >
nsigma * np.nanstd(data))] = np.nan
return data
# =========================================================
def _div0(self, a, b , val = 0):
""" ignore / 0, div0( [-1, 0, 1], 0 ) -> [0, 0, 0] """
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide( a, b )
c[ ~ np.isfinite( c )] = val # -inf inf NaN
return c
# =========================================================
def _xcorrf(self, profile1, profile2, dx):
""" Find displacement and correlation coefficient of two bed form profiles"""
corrf = np.correlate(profile2, profile1, mode = 'same') \
/np.sum(profile1**2)
if np.isnan(corrf).any():
displ = np.nan
corr = 0
else:
displ = (np.where(corrf == np.max(corrf))[0][0] - len(corrf)//2)*dx
corr = np.max(corrf)
return displ, corr
# =========================================================
def _lowpass(self, signal, dx, Lmax):
"""Return filtered signal according to maximum wavelength specification.
Assumes signal is already zero mean and nans interpolated
"""
W = np.fft.rfftfreq(len(signal), dx)
f_signal = np.fft.rfft(signal)
filtered_signal = f_signal.copy()
filtered_signal[np.where(self._div0(1,W) > Lmax)] = 0+0j
return np.fft.irfft(filtered_signal, n=len(signal)), f_signal
# =========================================================
def _detrend_xsect(self, xsect, dx, Lmax, mindat):
"""Return filtered transects based on maximum wavelength specification
This function handles fourier filtering for bed form profiles with missing
data.
"""
nans,x = self._nan_helper(xsect)
if sum(nans) > len(xsect)*(1-mindat):
xsect[:] = np.nan
f_signal = np.zeros(len(xsect)//2 + 1, 'complex')
else:
xsect[nans] = np.interp(x(nans), x(~nans), xsect[~nans])
xsect -= np.nanmean(xsect)
# filter signal
xsect[:], f_signal = self._lowpass(xsect, dx, Lmax)
xsect[nans] = np.nan
return xsect, f_signal
# =========================================================
def _detrend_all(self, Z, dx, Lmax, datmin_grid):
"""Filter all transects based on maximum wavelength specification"""
[nt, ny, nx] = Z.shape
z = np.empty((nt, ny, nx), 'float')
z_f = np.empty((nt, ny, nx//2 + 1), 'complex')
for t in range(nt):
for y in range(ny):
z[t,y], z_f[t,y] = self._detrend_xsect(Z[t,y].copy(), dx, Lmax, datmin_grid)
return z, z_f
# =========================================================
def _calc_Lc(self, signal, dx):
""" Calculate characteristic lengthscale of bed form profile """
Wwin = np.fft.rfftfreq(len(signal), dx)
f_signal = np.fft.rfft(signal)
amplitude = np.abs(f_signal) * dx
power = amplitude ** 2
fc = np.sum(power[1:]*Wwin[1:])/np.sum(power[1:])
return 1/fc
# =========================================================
def _calc_Hc(self, signal):
""" Calculate characteristic height scale of bed form profile"""
return 2.8 * np.nanstd(signal)
# =========================================================
def _bf_geom(self, Zwin, dx, datmin_geom = 0.25, nsigma = 2.5, maxslope = 0.15):
""" Calculate nt by ny bedform geometry array
Replace unrealistic geometries with nan according to
the following protocol:
-remove Hc and Lc values that are greater than nsigma standard
deviations from the mean value of timestep
-remove values where Hc/Lc > maxslope
-mask values that are only valid for Hc or Lc, so that Hc and Lc
are valid simultaneously everywhere
"""
[nt, ny, _] = Zwin.shape
Hc = np.empty((nt, ny), 'float')
Lc = np.empty((nt, ny), 'float')
for t in range(nt):
print('\rStatus: Calculating Lc and Hc (Survey {0}/{1})'.format(t+1, nt),
end = "\t\t\t\r")
for y in range(ny):
signal = Zwin[t,y]
nans, x = self._nan_helper(signal)
if sum(nans)/len(nans) < 1-datmin_geom:
Hc[t,y] = self._calc_Hc(signal)
signal[nans] = np.interp(x(nans), x(~nans), signal[~nans])
signal = signal - np.nanmean(signal)
Lc[t,y] = self._calc_Lc(signal, dx)
else:
Hc[t,y] = np.nan
Lc[t,y] = np.nan
# Remove outlier values
for i in range(nt):
Hc[i] = self._remove_outliers(Hc[i], nsigma)
Lc[i] = self._remove_outliers(Lc[i], nsigma)
# Remove unrealistic geometries
ixbad = np.where(np.ma.fix_invalid(Hc/Lc) > maxslope)
Hc[ixbad] = np.nan
Lc[ixbad] = np.nan
# Mask so Hc and Lc are always both valid
nanmask = np.ma.mask_or(np.isnan(Hc), np.isnan(Lc))
Hc[nanmask] = np.nan
Lc[nanmask] = np.nan
return Hc, Lc
# =========================================================
def _dx_mat(self, z, dx, mincorr):
""" Return displacement matrix. """
[nt, ny, _] = z.shape
disp = np.empty((nt, nt, ny), 'float')
for t1 in range(nt):
print('\rStatus: Calculating Bedform Displacements {0}/{1}'.format(t1+1, nt), end = "\t\t\t\r")
for t2 in range(nt):
for y in range(ny):
d, corr = self._xcorrf(z[t1,y], z[t2, y], dx)
if corr > mincorr:
disp[t1,t2,y] = d
else:
disp[t1, t2, y] = np.nan
return disp
# =========================================================
def _dt_mat(self, t):
""" Return duration matrix. """
nt = len(t)
delta_t = np.empty((nt, nt), 'float')
for t1 in range(nt):
for t2 in range(nt):
delta_t[t1,t2] = (t[t2] - t[t1]).total_seconds()
return delta_t
# =========================================================
def _clean_dx_mat(self, disp, deltat, Vmin, Vmax, Lc, dfracmin, dfracmax):
""" replace unrealistic velocities with nan according to the following
conditions:
- Vmax > Vc > Vmin
- dfracmax > disp/Lc > dfracmin
"""
[nt, _, ny] = disp.shape
for t1 in range(nt):
for t2 in range(nt):
for y in range(ny):
d_x = disp[t1, t2, y]
d_t = deltat[t1,t2]
lc = Lc[t1,y]
## comparison values
disp_max = np.min([lc * dfracmax,
Vmax * abs(d_t)])
disp_min = np.max([lc * dfracmin,
Vmin * abs(d_t)])
# test if valid
valid = disp_min < abs(d_x) < disp_max
# fix if not valid
if not valid:
disp[t1, t2, y] = np.nan
return disp
def _vregress(self, disp, deltat, r2min):
""" perfoms velocity regression on valid displacements """
[nt, _, ny] = disp.shape
Vc = np.empty((nt, ny), 'float')
r2 = np.empty_like(Vc)
for t1 in range(nt):
print('\rStatus: Performing Velocity Regression {0}/{1}'.format(t1+1, nt), end = "\t\t\t\r")
for y in range(ny):
d_x = disp[t1,:,y].flatten()
d_t = deltat[t1,:].flatten()
ival = ~np.isnan(d_x)
d_x = d_x[ival]
d_t = d_t[ival]
d_t = d_t[:,np.newaxis]
if len(d_x) < 3:
Vc[t1, y] = np.nan
else:
Vc[t1,y], resid, _, _ = np.linalg.lstsq(d_t, d_x)
r2[t1,y] = 1 - resid / (len(d_x) * np.var(d_x))
Vc[np.where(r2<r2min)] = np.nan
return Vc
def _bf_vel(self, z, t, dx, mincorr, Vmin, Vmax, Lc, dfracmin, dfracmax,
minR2):
""" Return nt by ny velocity array """
displacement = self._dx_mat(z, dx, mincorr)
duration = self._dt_mat(t)
displacement = self._clean_dx_mat(displacement, duration,
Vmin, Vmax, Lc, dfracmin, dfracmax)
Vc = self._vregress(displacement, duration, minR2)
return Vc
# ===================================================================
class FluxStats(Raster):
def __init__(self, datadir, save, flow_azimuth, dx, dy, istart, iend, Lmax,
ref_xyz, datmin_grid, usepts, nanrad, datmin_geom, nsigma,
maxslope, mincorr, Vmin, Vmax, dfracmin, dfracmax, minR2):
# Gridding parameters
Raster.__init__(self, datadir, save, ref_xyz, flow_azimuth, dx, dy,
usepts, nanrad)
self.istart = istart
self.iend = iend
self.Lmax = Lmax
self.datmin_grid = datmin_grid
# Geometric calculation parameters
self.datmin_geom = datmin_geom
self.nsigma = nsigma
self.maxslope = maxslope
# Velocity parameters
self.mincorr = mincorr
self.Vmin = Vmin
self.Vmax = Vmax
self.dfracmin = dfracmin
self.dfracmax = dfracmax
self.minR2 = minR2
self.Zwin = self.Zarr[:,:,istart:iend]
self.z, self.z_f = self._detrend_all(self.Zwin, dx, Lmax, datmin_grid)
[self.Hc, self.Lc] = self._bf_geom(self.z, self.dx, self.datmin_geom,
self.nsigma, self.maxslope)
self.Vc = self._bf_vel(self.z, self.t, self.dx, self.mincorr,
self.Vmin, self.Vmax, self.Lc, self.dfracmin,
self.dfracmax, self.minR2)
def q(datadir, save, flow_azimuth, dx, dy, istart, iend, Lmax, ref_xyz=0,
datmin_grid=0.25, usepts=20, nanrad=2, datmin_geom=0.8, nsigma=2.5,
maxslope=0.15, mincorr=0.85, Vmin=0.1/3600, Vmax=3/3600, dfracmin=0.001,
dfracmax=0.2, minR2=0.8):
return FluxStats(datadir, save, flow_azimuth, dx, dy, istart, iend, Lmax,
ref_xyz, datmin_grid, usepts, nanrad, datmin_geom, nsigma,
maxslope, mincorr, Vmin, Vmax, dfracmin, dfracmax, minR2)
|
989,816 | 1b0478c7aaee8c7b0ff5539be819a1b45dda10b2 | # -*- coding: utf-8 -*-
"""
Question 3.5:
Sort Stack: Write a program to sort a stack such that the smallest items are on the top. You can use a additional
temporary stack, but you may not copy the elements into any other data structure(such as array). The
stack supports the following operations: push, pop, peek and isEmpty.
"""
class Stack:
def __init__(self):
self.stack = []
def pop(self):
if self.isEmpty():
raise Exception('Pop Error!')
return self.stack.pop()
def push(self, val):
self.stack.append(val)
def peek(self):
if self.isEmpty():
raise Exception('Peek Error!')
return self.stack[-1]
def isEmpty(self):
if not len(self.stack):
return True
else:
return False
def sort_stack(s):
"""
Take O(n2) time and O(n) space
:param s: Stack To Sort
:return: Sorted Stack
"""
t = Stack()
while not s.isEmpty():
temp = s.pop()
while not (t.isEmpty() or t.peek() >= temp):
s.push(t.pop())
t.push(temp)
return t
if __name__ == '__main__':
s = Stack()
s.push(2)
s.push(3)
s.push(1)
s.push(5)
s.push(4)
res = sort_stack(s)
print(res.pop())
print(res.pop())
print(res.pop())
print(res.pop())
print(res.pop())
|
989,817 | a99044c679edec9cc464bff2a0df5b41912b3693 | # GIL 全称 global interpreter lock
# Python 中的一个线程对应于C语言中的一个线程
# GIL 使得, 同一时刻只有一个线程运行在一个CPU上执行字节码, 无法将多个线程映射到多个CPU上执行
# GIL 会根据执行的字节码行数、时间片释放GIL、I/O操作
# import dis
# def add(a):
# a += 1
#
# print(dis.dis(add))
total = 0
def add():
global total
for _ in range(1000000):
total += 1
def desc():
global total
for _ in range(1000000):
total -= 1
if __name__ == '__main__':
from threading import Thread
thread1 = Thread(target=add)
thread2 = Thread(target=desc)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print(total) |
989,818 | 8a81614e418f9ee2e00b2e14b25335bc7669b107 | #import random
#def rand(start, end, str):
#res = []
#for r in range(str):
#res.append(random.randint(start, end))
#return res
#str = 'r', 'y', 'g', 'b'
#start = 'r'
#end = 'y'
#print(rand(start, end, str))
import tkinter as tk
#import color_blocks
class color_string:
def __init__(self, parent):
self.parent = parent
self.canvas = tk.Canvas(parent)
self.draw_board()
def draw_board(self):
self.canvas = tk.canvas(self.parent, HEIGHT = 600, WIDTH = 800)
self.canvas.pack()
#self.squares()
class SelfSquares():
def __init__(self,sprites, input_dict):
self.sprites = sprites
self.input = input_dict
# Show Dull Red
self.dull_red = dull_red(self.input)
self.sprites.add(self.dull_red)
# Show Dull Green
self.dull_green = dull_green(self.input)
self.sprites.add(self.dull_green)
# Show Dull Blue
self.dull_blue = dull_blue(self.input)
self.sprites.add(self.dull_blue)
# Show Dull Yellow
self.dull_yellow = dull_yellow(self.input)
self.sprites.add(self.dull_yellow)
def animate(self, idx = 0)
c = self.pattern[idx]
self.canvas.itemconfig(self.squares[c], fill= self.light[c], outline = self.light[c])
self.parent.after(1000, lambda: self.canvas.itemconfig(self.squares.[c],
fill = self.dark[c], outline = self.dark[c]))
idx += 1
if idx < len(self.pattern):
self.parent.after(1000, lambda: self.animate(idx))
else:
self.canvas.bind('<1>', self.select)
def select(self, event=None):
id = self.canvas.find_withtage("current")[0]
color = self.ids[id]
self.selections += color
self.canvas.itemconfig('current', fill = self.light[color, outline=self.light[color])
self.parent.after(1000, lambda: self.canvas.itemconfig(id, fill = self.dark[color], outline = self.dark[color]))
if self.pattern == self.selections:
self.pattern += random.choice('rgby')
self.selections = ''
self.high_score = max(self.high_score, len(self.pattern))
self.animate()
elif self.pattern[len(self.selections)-1] != color:
self.canvas.unbind()
self.parent.after(2000, lambda: self.status.config(text = ''))
self.parent.after(2000, self.draw_board)
print(self.pattern, self.selections)
def score(self, event=None):
self.status.config(text=self.high_score)
self.parent.after(2000, lambda: self.status.config(text = ''))
root = tk.Tk()
color_string = color_string(root)
root.mainloop() |
989,819 | db5ceb0f712eb7f9542eef570a030affa13f4b01 | from tkinter import *
import random
import time
import numpy as np
import csv
from copy import deepcopy
##########################################################################
############ 공의 위치 파일 저장/불러오기 #############
# 공의 위치 파일 저장(1회만 파일 저장) ####### !!!! 두번째부터는 None으로 놓기!!!! #######
save_ballloc = 'c:\python\data\pingpong_move.csv'
# save_ballloc 파일 위치 입력(반드시 입력해야 함. save_ballloc 의 위치와 동일한 위치로 설정)
load_ballloc = 'c:\python\data\pingpong_move.csv'
############ 회귀분석 가중치 파일 저장/불러오기 #############
# 가중치 파일 저장(저장하고 싶으면 위치 입력. 아니면 None 으로 놓기)
save_weightloc = 'c:\python\data\pingpong_weight.csv'
# save_weightloc 파일 위치 입력(파일 참조하지 않으려면 None 으로 놓기)
load_weightloc = 'c:\python\data\pingpong_weight.csv'
############ 경사감소법 튜닝 ###########
# 경사감소법 learning_rate(변경x)
learning_rate = 0.0001
# 경사감소법 시행횟수(변경x)
training_cnt= 30000
#가능조합(learning_rate = 0.00001, training_cnt = 50000)
#가능조합(learning_rate = 0.00002, training_cnt = 25000)
##########################################################################
class Ball:
def __init__(self, canvas, paddle, color, save=False):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color) # 공 크기 및 색깔
self.canvas.move(self.id, 245, 100) # 공을 캔버스 중앙으로 이동
self.x = random.choice([-4,-3, -2, -1, 1, 2, 3,4]) # 처음 공이 패들에서 움직일때 왼쪽으로 올라갈지 오른쪽으로 올라갈지 랜덤으로 결정되는 부분
self.y = -3 # 처음 공이 패들에서 움직일때 위로 올라가는 속도
self.canvas_height = self.canvas.winfo_height() # 캔버스의 현재 높이를 반환한다.(공이 화면에서 사라지지 않기위해)
self.canvas_width = self.canvas.winfo_width() # 캔버스의 현재 넓이를 반환한다.(공이 화면에서 사라지지 않기위해)
self.hit_bottom = False
self.save = save
self.ball_start = []
self.ball_end = []
self.convertloc = self.canvas.coords(self.id)[0]
self.leftorright = 0
def hit_paddle(self, pos): # 패들에 공이 튀기게 하는 함수
paddle_pos = self.canvas.coords(self.paddle.id)
if self.save == True:
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]: # 공이 패들에 닿았을때 좌표
return True
elif self.save == False:
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]: # 공이 패들에 내려오기 직전 좌표
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]: # 공이 패들에 닿았을때 좌표
return True
return False
############# 공이 떨어지는 가상의 좌표 ############
def endloc(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if 290 > pos[1] >= 285 and pos[3] <= paddle_pos[3] and self.y > 0: # 공이 패들 통과할 때의 좌표
return pos[0]
def draw(self):
self.canvas.move(self.id, self.x, self.y) # 공을 움직이게 하는 부분
pos = self.canvas.coords(self.id) # 볼의 현재 좌표를 출력해준다. 공 좌표( 서쪽(0) , 남쪽(1) , 동쪽(2), 북쪽(3) )
paddle_pos = self.canvas.coords(self.paddle.id)
#############################################################
# 가상의 좌표를 만드는 과정
# self.leftorright는 기본은 0, 최초로 벽에 부딪혔을 때 왼쪽 벽이면 -1, 오른쪽 벽이면 1 을 출력
if self.leftorright == 0:
self.convertloc += float(self.x)
elif self.leftorright != 0:
self.convertloc += self.leftorright * abs(float(self.x))
#############################################################
if pos[1] <= 0:
self.y *= -1
if pos[3] >= self.canvas_height:
self.x = random.choice([-1,1])
self.y *= -1
if pos[0] <= 0:
self.x *= -1
######### 최초로 왼쪽 벽에 부딪히면 self.leftorright = -1이 됨 ##########
if self.leftorright == 0:
self.leftorright = -1
if pos[2] >= self.canvas_width:
self.x *= -1 # 공을 왼쪽으로 돌린다.
######### 최초로 오른쪽 벽에 부딪히면 self.leftorright = 1이 됨 ##########
if self.leftorright == 0:
self.leftorright = 1
if self.hit_paddle(pos) == True:
self.x = random.choice(range(-11,12,2))
self.y *= -1
######### (공의 시작 x좌표, 시작 시 x속력, y속력, 상수1) 을 저장 ##########
self.ball_start.append([pos[0], float(self.x), float(self.y), 1.0])
######### (공이 떨어진 x 좌표) 를 저장
self.ball_end.append(self.convertloc)
######### 패들에 부딪히면, 새로운 공의 시작 정보를 저장하기 위해 가상좌표와 leftorright 값을 초기화 ########
self.convertloc = pos[0]
self.leftorright = 0
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 300)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
def draw(self):
pos = self.canvas.coords(self.id)
if pos[0] <= 0 and self.x < 0: # 패들의 위치가 왼쪽 끝이고, 이동하려는 방향이 왼쪽이면 함수 종료(이동 안 함)
return
elif pos[2] >= self.canvas_width and self.x > 0: # 패들의 위치가 오른쪽 끝이고,이동하려는 방향이 오른쪽이면 종료
return
self.canvas.move(self.id, self.x, 0)
############ 공이 떨어지는 가상의 좌표를 실제 게임 내 좌표로 바꿔주는 메소드 ##############
def convertendloc(self, convertloc):
cnt = 0
if convertloc in range(486):
return convertloc
elif convertloc < 0:
while True:
if cnt % 2 == 0 and cnt * -485 - convertloc in range(486):
return cnt * -485 - convertloc
elif cnt % 2 == 1 and (cnt + 1) * 485 + convertloc in range(486):
return (cnt + 1) * 485 + convertloc
cnt += 1
elif convertloc > 485:
while True:
if cnt % 2 == 0 and (cnt + 2) * 485 - convertloc in range(486):
return (cnt + 2) * 485 - convertloc
elif cnt % 2 == 1 and (cnt + 1) * -485 + convertloc in range(486):
return (cnt + 1) * -485 + convertloc
cnt += 1
############# 회귀분석식을 이용해 공이 떨어질 가상의 위치 예측하는 메소드 ##############
def prediction(self, input, weight):
return weight[0] * input[0] + weight[1] * input[1] + weight[2] * input[2] + weight[3] * input[3]
############# 공이 떨어질 위치로 패들을 움직이는 메소드 #############
def predict_move(self, convertloc):
loc = self.convertendloc(convertloc)
pos = self.canvas.coords(self.id)
if pos[0]+40 <loc-5 and pos[2]-40 > loc+10:
self.x = 0
print('stop')
else:
if pos[2]-40 < loc+10:
self.x = 3
print('+3')
elif pos[0]+40 > loc-5:
self.x = -3
print('-3')
return self.x, 'loc', loc, 'pos', (pos[0],pos[2])
def move(self, x, y):
self.x = x
############# 경사감소법 및 회귀분석 머신러닝 ################
class machine_learning():
########## 비용함수 메소드 ###########
@staticmethod
def Loss(x, y, weight):
loss = np.sum((x.dot(weight) - y.reshape(len(y),1)) ** 2) / (2 * len(x))
print(loss)
return loss
########## 경사감소법 및 회귀분석 가중치 계산 메소드 ##########
@staticmethod
def gradient_descent(x, alpha=0.00001, descent_cnt=1):
X = x[:, 0:4]
Y = x[:, 4]
M = len(x)
minloss = 10 ** 20
WEIGHT = np.zeros((4,1)) # 초기 weight
loss_history = np.zeros((descent_cnt, 1))
for cnt in range(descent_cnt):
predictions = X.dot(WEIGHT).flatten()
errors_x1 = (predictions - Y) * X[:, 0]
errors_x2 = (predictions - Y) * X[:, 1]
errors_x3 = (predictions - Y) * X[:, 2]
errors_w0 = (predictions - Y) * X[:, 3]
WEIGHT_backup = deepcopy(WEIGHT)
# beta = theta - alpha * (X.T.dot(X.dot(beta)-y)/m)
WEIGHT[0][0] = WEIGHT[0][0] - alpha * (1.0 / M) * errors_x1.sum()
WEIGHT[1][0] = WEIGHT[1][0] - alpha * (1.0 / M) * errors_x2.sum()
WEIGHT[2][0] = WEIGHT[2][0] - alpha * (1.0 / M) * errors_x3.sum()
WEIGHT[3][0] = WEIGHT[3][0] - alpha * (1.0 / M) * errors_w0.sum()
loss_history[cnt, 0] = machine_learning.Loss(X, Y, WEIGHT)
########## BOLD DRIVER 방법 #########
if minloss >= loss_history[cnt,0]:
minloss = loss_history[cnt,0]
alpha *= 1.1
elif minloss < loss_history[cnt,0]:
alpha *= 0.5
WEIGHT = WEIGHT_backup
return WEIGHT, loss_history
########### 세이브 로드 관련 클래스 ###########
class SaveLoad():
@staticmethod
def saveCSV(ballloc, weightloc):
try:
if weightloc != None:
f = open((weightloc), 'a')
w = csv.writer(f, delimiter=',', lineterminator='\n')
for key in machine_learning.gradient_descent(np.array(ball_loc_save), learning_rate, training_cnt)[0]:
w.writerow(key)
f.close()
print('weight saved')
if ballloc != None:
f = open((ballloc), 'a')
w = csv.writer(f, delimiter=',', lineterminator='\n')
for key in ball_loc_save:
w.writerow(key)
f.close()
print('ball saved')
except FileNotFoundError and TypeError:
print('No Save')
@staticmethod
def loadCSV(ballloc,weightloc = None):
try:
if weightloc == None :
pingpong = [data for data in csv.reader(open(ballloc, 'r'))]
for pp in range(len(pingpong)):
for p in range(5):
pingpong[pp][p] = float(pingpong[pp][p])
pingpong = np.array(pingpong)
return machine_learning.gradient_descent(pingpong,learning_rate, training_cnt)[0]
else :
weight = [data for data in csv.reader(open(weightloc, 'r'))]
return np.array([weight[-4],weight[-3],weight[-2],weight[-1]],dtype=float)
except FileNotFoundError :
print('파일 로드 위치를 지정해주세요')
if __name__ == '__main__':
############# 머신러닝 위한 시뮬레이션용 ###############
tk = Tk() # tk 를 인스턴스화 한다.
tk.title("Game") # tk 객체의 title 메소드(함수)로 게임창에 제목을 부여한다.
tk.resizable(0, 0) # 게임창의 크기는 가로나 세로로 변경될수 없다라고 말하는것이다.
tk.wm_attributes("-topmost", 1) # 다른 모든 창들 앞에 캔버스를 가진 창이 위치할것을 tkinter 에게 알려준다.
canvas = Canvas(tk, width=500, height=400, bd=0, highlightthickness=0)
canvas.configure(background='black')
canvas.pack() # 앞의 코드에서 전달된 폭과 높이는 매개변수에 따라 크기를 맞추라고 캔버스에에 말해준다.
tk.update() # tkinter 에게 게임에서의 애니메이션을 위해 자신을 초기화하라고 알려주는것이다.
paddle = Paddle(canvas, 'black')
ball = Ball(canvas, paddle, 'black', save=True)
for i in range(10000):
if ball.hit_bottom == False:
ball.draw()
paddle.move(paddle.x,0)
paddle.draw()
ball_loc_save = []
for idx_start in range(0,len(ball.ball_start)-1):
try:
ball_loc_save.append(ball.ball_start[idx_start]+[ball.ball_end[idx_start+1]])
except IndexError:
continue
################ 파일 세이브 ################
SaveLoad.saveCSV(save_ballloc,save_weightloc)
################ 파일 로드 ################
weight = SaveLoad.loadCSV(load_ballloc, load_weightloc)
################# 머신러닝 배운 후 플레이 ##################
paddle = Paddle(canvas, 'white')
ball = Ball(canvas, paddle, 'white', save=False)
while True:
if ball.hit_bottom == False:
ball.draw()
try:
convertloc = int(paddle.prediction(ball.ball_start[-1], weight)[0])
print('prediction', paddle.predict_move(convertloc))
paddle.move(paddle.x, 0)
except IndexError:
#paddle.move(random.choice([-3, 3]), 0) # 맨처음에 랜덤으로 두게 하려면 활성화
paddle.move(ball.x,0) # 맨처음에 공을 따라가게 하려면 활성화
paddle.draw()
tk.update_idletasks()
tk.update()
time.sleep(0.01)
|
989,820 | 4ad3100f5b176636779702ddbdd44d1b06cc3b9f | num = int(input('enter a number between 1 And 12'))
for k in range(1,13):
print(num*k)
|
989,821 | 4417c713eb3cab09946efc87dc44179fbdfcf739 | #! /usr/bin/env python3
from bs4 import BeautifulSoup
import os
import shutil
import argparse
from glob import iglob
import re
# 11 oszlop van. (TSV)
#
# 1 word szóalak
# 2 lemma szótő
# 3 msd morfoszintaktikai leírás
# 4 ctag (figyelmen kívül hagyandó)
# 5 ana részletes morfológiai elemzés
# 6 word_cv szóalak CV-váza
# 7 word_syll szóalak szótagszáma
# 8 lemma_cv szótő CV-váza
# 9 lemma_syll szótő szótagszáma
# 10 word_phon szóalak fonetikai reprezentációja
# 11 lemma_phon szótő fonetikai reprezentációja
# a kimenet még mindig rosszul van indexelve.
ANNOTATION_TYPES_ORDERED = (
'word',
'lemma',
'pos',
'ctag',
'msd', # which is ana but it has to be called msd
'word_cv',
'word_syll',
'lemma_cv',
'lemma_syll',
'word_phon',
'lemma_phon')
OPTS = (
'header',
'data',
'sentences',
'paragraphs',
'word',
# 'lemma',
# 'pos',
'msd',
# 'word_cv',
# 'word_syll',
# 'lemma_cv',
# 'lemma_syll',
# 'word_phon',
# 'lemma_phon'
)
OPT_DICT = {
'header': (None, 'header', ''),
'data': (None, 'data', ''),
'sentences': (None, 'sentences', 'base'),
'paragraphs': (None, 'paragraphs', 'base'),
'word': (None, 'tokens', 'noske'),
# 'lemma': (('lemma',), 'lemma', 'noske'),
# 'pos': (('word', 'lemma', 'pos'), 'part-of-speech', 'noske'),
# 'msd': (('lemma', 'pos', 'msd'), 'morpho', 'noske'),
'msd': (('lemma', 'pos', 'msd'), 'morpho', 'hnc'),
# 'word_cv': (('word', 'word_cv'), 'word_cv', 'noske'),
# 'word_syll': (('word', 'word_syll'), 'word_syll', 'noske'),
# 'lemma_cv': (('lemma', 'lemma_cv'), 'lemma_cv', 'noske'),
# 'lemma_syll': (('lemma', 'lemma_syll'), 'lemma_syll', 'noske'),
# 'word_phon': (('word', 'word_phon'), 'word_phon', 'noske'),
# 'lemma_phon': (('lemma', 'lemma_phon'), 'lemma_phon', 'noske')
}
PAT_CUT_SPACE = re.compile(r' ?NoSpace ?')
STOPS = ('SSTOP', 'PSTOP')
BASE = ('paragraphs', 'sentences', 'header', 'data')
FS_ATTRS = {'type': 'lex', 'xmlns': 'http://www.tei-c.org/ns/1.0'}
F2_ATTRS = {'name': 'lex'}
F4_ATTRS = {'name': ''}
ANNOT_SPAN_ATTRS = {'id': '',
'from': '',
'to': ''}
BASE_SPAN_ATTRS = {'from': '', 'to': ''}
LAYER_ATTRS = {'docid': '',
'xmlns': 'http://ids-mannheim.de/ns/KorAP',
'version': 'KorAP-0.4'}
RAW_TEXT_ATTRS = {'docid': '',
'xmlns': 'http://ids-mannheim.de/ns/KorAP'}
PAT_CES_HEADER = re.compile(r'<cesHeader.+?(</cesHeader>)', re.DOTALL | re.MULTILINE)
PAT_SPLITTED_FILES = re.compile(r'(.*?)(?:_\d{3})(\.clean)?\.mxml')
def writing_backup_file(backup_filepath, create_new_backup_file, last_file_infos=None):
if create_new_backup_file:
with open(backup_filepath, 'w', encoding='utf-8') as outpf:
pass
else:
with open(backup_filepath, 'a', encoding='utf-8') as outpf:
print('\t'.join(last_file_infos), file=outpf)
def loading_backup_file(backup_filepath, create_new):
# filenév, id, child id
if create_new:
writing_backup_file(backup_filepath, create_new)
return 0, 0, None, set()
line = ''
processed_files = set()
try:
with open(backup_filepath, encoding='utf-8') as f:
for line in f:
processed_files.add(line.split()[0])
pass
except FileNotFoundError:
print('FileNotFound: creating new backup file.')
writing_backup_file(backup_filepath, True)
return 0, 0, None, set()
line = line.strip()
if len(line) > 0:
values = line.split()
if len(values) == 4:
if values[3] == 'False':
return int(values[1]) - 1, int(values[2]), values[0], processed_files
else:
return int(values[1]), 0, values[0], processed_files
def read(noske_clean_files_dict, last_file_index, processed_files):
for i, (noske_file, clean_file) in enumerate(noske_clean_files_dict.items()):
if i < last_file_index and \
os.path.splitext(os.path.basename(noske_file.replace('source.', '', 1)))[0] in processed_files:
continue
with open(noske_file, encoding="iso-8859-2") as f:
# a kimeneti listát lehet, tuple-re kéne változtatni
yield os.path.basename(noske_file), clean_file, f.read()
def gen_header_xml(header_type, corpora_dir=None, parent_dir=None, clean_xml=None, div=None, docid=None):
"""
:param docid:
:param div:
:param parent_dir:
:param corpora_dir:
:param clean_xml:
:param header_type:
options: 1. 'corpus_header'
2. '2nd_level_header': header of a complete XML
3. '3rd_level_header': header of part of the XML
:return:
"""
if header_type == '2nd_level_header':
ces_header = PAT_CES_HEADER.search(clean_xml)
os.makedirs(os.path.join(corpora_dir, parent_dir), exist_ok=True)
with open(os.path.join(corpora_dir, parent_dir, 'header.xml'), 'w', encoding='utf-8') as outpf:
print('<?xml version="1.0" encoding="UTF-8"?>',
'<?xml-model href="header.rng" '
'type="application/xml" '
'schematypens="http://relaxng.org/ns/structure/1.0"?>',
'<!DOCTYPE idsCorpus PUBLIC "-//IDS//DTD IDS-XCES 1.0//EN" '
'"http://corpora.ids-mannheim.de/idsxces1/DTD/ids.xcesdoc.dtd">',
ces_header.group().replace('cesHeader', 'idsHeader'), sep='\n', file=outpf)
return
soup = BeautifulSoup(
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<?xml-model href="header.rng" '
'type="application/xml" '
'schematypens="http://relaxng.org/ns/structure/1.0"?>'
'<!DOCTYPE idsCorpus PUBLIC "-//IDS//DTD IDS-XCES 1.0//EN" '
'"http://corpora.ids-mannheim.de/idsxces1/DTD/ids.xcesdoc.dtd">',
features='lxml')
if header_type == '3rd_level_header':
ids_header = soup.new_tag('idsHeader', attrs={'type': 'text'}) # parent: soup
soup.append(ids_header)
file_desc = soup.new_tag('fileDesc') # parent: ids_header
ids_header.append(file_desc)
title_stmt = soup.new_tag('titleStmt') # parent: file_desc
file_desc.append(title_stmt)
text_sigle = soup.new_tag('textSigle') # parent: title_stmt, string = DOC00001.00001
title_stmt.append(text_sigle)
text_sigle.string = docid
t_title = soup.new_tag('t.title') # parent: title_stmt, string = title
title_stmt.append(t_title)
content = div.find('head')
if content:
t_title.string = content.text
publication_stmt = soup.new_tag('publicationStmt') # parent: file_desc
file_desc.append(publication_stmt)
source_desc = soup.new_tag('sourceDesc') # parent: file_desc
file_desc.append(source_desc)
bibl_struct = soup.new_tag('biblStruct') # parent: source_desc
source_desc.append(bibl_struct)
analytic = soup.new_tag('analytic') # parent: bibl_struct
bibl_struct.append(analytic)
h_author = soup.new_tag('h.author') # parent: analytic, string = author
analytic.append(h_author)
# <docAuthor>Addbot</docAuthor>
content = div.find('docauthor')
if content:
h_author.string = content.text
encoding_desc = soup.new_tag('encodingDesc') # parent: idsHeader
ids_header.append(encoding_desc)
profile_desc = soup.new_tag('profileDesc') # parent: idsHeader
ids_header.append(profile_desc)
creation = soup.new_tag('creation') # parent: profileDesc
profile_desc.append(creation)
creat_date = soup.new_tag('creatDate') # parent: creation, string = date
creation.append(creat_date)
# <date ISO8601="2013-03-02T14:36:02Z"></date>
content = div.find('date')
if content:
if isinstance(content, dict) and 'iso8601' in content.keys():
creat_date.string = content['iso8601']
else:
creat_date.string = content.text
return soup
def gen_data_xml(data, docid):
soup = BeautifulSoup(
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<?xml-model href="text.rng" type="application/xml" '
'schematypens="http://relaxng.org/ns/structure/1.0"?>',
features='lxml')
txt = soup.new_tag('text')
# print(f'"{data}"')
txt.string = data
meta = soup.new_tag('metadata', file='metadata.xml')
RAW_TEXT_ATTRS['docid'] = docid
raw_text = soup.new_tag('raw_text', attrs=RAW_TEXT_ATTRS)
raw_text.append(meta)
raw_text.append(txt)
soup.append(raw_text)
return soup
def gen_annotated_xml(annot_types, docid, annotations_per_line, opt):
soup = BeautifulSoup(
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<?xml-model href="span.rng" type="application/xml" '
'schematypens="http://relaxng.org/ns/structure/1.0"?>',
features='lxml')
from_index_sp = 0
from_index = 0
to_index = 0
iden = 0
LAYER_ATTRS['docid'] = docid
soup.append(soup.new_tag('layer', attrs=LAYER_ATTRS))
span_list = soup.new_tag('spanList')
for i, (is_space, annotations) in enumerate(annotations_per_line):
if not is_space and from_index > 0:
from_index -= 1
if annotations == 'SSTOP' or annotations == 'PSTOP':
if (annotations == 'SSTOP' and opt == 'sentences') or (annotations == 'PSTOP' and opt == 'paragraphs'):
if from_index_sp == to_index:
from_index_sp -= 1
BASE_SPAN_ATTRS['from'] = f'{from_index_sp}'
BASE_SPAN_ATTRS['to'] = f'{to_index}'
span = soup.new_tag('span', attrs=BASE_SPAN_ATTRS)
span_list.append(span)
from_index_sp = to_index + 1
continue
to_index = from_index + len(annotations['word'])
# tag+number --> lowest the number the higher in hierarchy it is.
if opt not in BASE:
ANNOT_SPAN_ATTRS['id'] = f's_{iden}'
ANNOT_SPAN_ATTRS['from'] = f'{from_index}'
ANNOT_SPAN_ATTRS['to'] = f'{to_index}'
span = soup.new_tag('span', attrs=ANNOT_SPAN_ATTRS)
if annot_types:
# 1. szint
fs1 = soup.new_tag('fs', attrs=FS_ATTRS)
# 2. szint
f2 = soup.new_tag('f', attrs=F2_ATTRS)
# 3. szint
fs3 = soup.new_tag('fs')
for annot in annot_types:
# 4.szint: bármennyi következhet egymásután
F4_ATTRS['name'] = annot
f4 = soup.new_tag('f', attrs=F4_ATTRS)
f4.string = annotations[annot]
fs3.append(f4)
span.append(fs1)
fs1.append(f2)
f2.append(fs3)
span_list.append(span)
from_index = to_index + 1
iden += 1
soup.layer.append(span_list)
return soup
def gen_xml(meta_dict, opt):
"""
gen. options:
- header: metadata XML of analyzed text
- data: XML of raw text
-(sentences: XML of boundaries of sentences
- paragraphs): XML of boundaries of paragraphs
- word: XML of boundaries of words
- lemma: XML of lemmas
- pos: XML of lemmas + pos (msd)
- msd(originally ana): XML of lemmas + pos (msd) + ana
- word_cv: XML of lemmas + word_cv
- word_syll: XML of lemmas + word_syll
- lemma_cv: XML of lemmas + lemma_cv
- lemma_syll: XML of lemmas + lemma_syll
- word_phon: XML of lemmas + word_phon
- lemma_phon: XML of lemmas + lemma_phon
"""
output_xmlname = OPT_DICT[opt][1]
annot_folder = OPT_DICT[opt][2]
if opt == 'header':
output_xml = gen_header_xml(
'3rd_level_header',
docid=f'{meta_dict["corpora_dir"]}/'
f'{meta_dict["parent_folder_name"]}.{meta_dict["child_folder_name"]}',
div=meta_dict['clean_div'])
elif opt == 'data':
output_xml = gen_data_xml(meta_dict['data'],
f'{meta_dict["corpora_dir"]}_'
f'{meta_dict["parent_folder_name"]}.{meta_dict["child_folder_name"]}'
)
else:
output_xml = gen_annotated_xml(OPT_DICT[opt][0],
f'{meta_dict["corpora_dir"]}_'
f'{meta_dict["parent_folder_name"]}.{meta_dict["child_folder_name"]}',
meta_dict['annotations_per_line'], opt)
return {'output_xml': output_xml, 'output_xmlname': output_xmlname, 'annot_folder': annot_folder}
def gen_docname(num_of_doc, i):
num_of_doc = f'{num_of_doc[0:-len(str(i))]}{i}'
return num_of_doc
def get_data(div):
data = []
txt = div.text
for line in txt.split('\n'):
line = line.strip()
if len(line) > 0:
if line == '###NOSPACE###':
data.append('NoSpace')
else:
data.append(line.split('\t')[0])
return PAT_CUT_SPACE.sub('', ' '.join(data))
def get_annotations(tag_to_iterate, annotations_per_line):
for tag in tag_to_iterate:
if tag.name == 'div' or tag.name == 'sp':
get_annotations(tag, annotations_per_line)
elif tag.name is not None:
for s_tag in tag.find_all('s'):
if len(s_tag.text.strip()) == 0:
continue
is_space = True
txt = s_tag.text.strip().split('\n')
for line in txt:
line = line.strip()
if len(line) == 0:
continue
if line == '###NOSPACE###':
is_space = False
continue
annotations = {}
annotation_count = 0
for k, annotation in enumerate(line.split('\t')[:11]):
annotation_count = k
annotations[ANNOTATION_TYPES_ORDERED[k]] = annotation
if annotation_count < 10:
# No-ske: néha nincsen annyi tabok száma -1, amennyi hely az elemzésfajtákhoz kell:
start = 11 - (10 - annotation_count)
for n in range(start, len(ANNOTATION_TYPES_ORDERED)):
annotations[ANNOTATION_TYPES_ORDERED[n]] = '__NA__'
annotations_per_line.append((is_space, annotations))
if not is_space:
is_space = True
annotations_per_line.append((True, 'SSTOP'))
if tag.name == 'p':
annotations_per_line.append((True, 'PSTOP'))
if tag_to_iterate.find('p') is None:
annotations_per_line.append((True, 'PSTOP'))
def process_documents(noske_inps, corpora_dir, last_parent_folder_number, last_child_folder_number, backup_filepath):
parent_folder_name = 'DOC'
parent_folder_number = '000000'
last_clean_xml_path = ''
last_len_of_divs = 0
start_div_number = 0
clean_divs = []
for i, (noske_fname, clean_xml_path, noske_xml) in enumerate(noske_inps, start=last_parent_folder_number + 1):
parent_folder_number = gen_docname(parent_folder_number, i)
child_folder_name = '000000'
# NoSkE soup létrehozása
noske_soup = BeautifulSoup(noske_xml.replace('<g/>', '###NOSPACE###'), 'xml')
# A doc tagen belüli fájlnév --> <doc file="lit_er_ambrus_l.s1.clean" ...>
noske_doc = noske_soup.find('doc')
fname_wo_ext = noske_doc['file']
print(fname_wo_ext)
# NoSkE div tag-listájának létrehozása. Egy div egyenlő egy dokumentummal
noske_divs = noske_soup.find_all('div')
if clean_xml_path == last_clean_xml_path:
start_div_number += last_len_of_divs
else:
start_div_number = 0
# A NoSkE formátumban lévő fájlok clean megfelelője (metaadatokhoz, tehát header.xml-ekhez)
if len(clean_xml_path) > 1:
clean_xml = open(clean_xml_path, encoding='iso-8859-2').read()
clean_soup = BeautifulSoup(clean_xml, 'html.parser')
else:
continue
# clean div tag-listájának létrehozása.
clean_divs = clean_soup.find_all('div')
if len(noske_divs[0].find_all('div')) > 0:
noske_divs = noske_divs[0:1]
clean_divs = clean_divs[0:1]
# Az egész bemeneti XML metaadatának (header-jének) legenerálása és kiírása
if last_child_folder_number == 0:
gen_header_xml('2nd_level_header', corpora_dir=corpora_dir,
parent_dir=f'{parent_folder_name}{parent_folder_number}',
clean_xml=clean_xml)
for j, div in enumerate(noske_divs):
if j < last_child_folder_number:
continue
child_folder_number = j + 1
clean_div = clean_divs[j + start_div_number]
child_folder_name = gen_docname(child_folder_name, child_folder_number)
annotations_per_line = []
data = get_data(div)
# A szövegrész elemzésének hozzáadása az annotations listához
get_annotations(div, annotations_per_line)
meta_dict = {'fname_wo_ext': fname_wo_ext, 'annotations_per_line': annotations_per_line, 'data': data,
'clean_div': clean_div, 'corpora_dir': os.path.basename(corpora_dir),
'parent_folder_name': f'{parent_folder_name}{parent_folder_number}',
'child_folder_name': child_folder_name}
for opt in OPTS:
yield gen_xml(meta_dict, opt), \
meta_dict['parent_folder_name'], \
meta_dict['child_folder_name']
is_last_subfile = True if j == len(noske_divs) - 1 else False
writing_backup_file(backup_filepath, False,
(fname_wo_ext, f'{i}', f'{child_folder_number}', f'{is_last_subfile}'))
last_clean_xml_path = clean_xml_path
last_len_of_divs = len(noske_divs)
last_child_folder_number = 0
def str2bool(v):
"""
Eldönti, hogy az argumentum Igaz, vagy Hamis értéket képvisel
:param v: argumentum értéke
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_args():
"""
:param basp: folder of output
:return: 1: folder of output, 2: folder of input
"""
parser = argparse.ArgumentParser()
parser.add_argument('input_noske_filepath', help='Path to NosKe files.', nargs="+")
parser.add_argument('-d', '--output_dir', help='Path to output directory', nargs='?')
parser.add_argument('-m', '--input_clean_iglob_filepath', help='Path of clean files root folder for iglob module.', nargs="?")
parser.add_argument('-b', '--backup_filepath',
help='Path of backup file which contains informations about processed files.',
nargs='?', default='./backup.txt')
parser.add_argument('-c', '--create_new',
help='Create whole new output. In case of one would start to convert '
'the MNSZ to KorAp format from the beginning.',
nargs='?',
type=str2bool, const=True, default=False)
parser.add_argument('-a', '--append', help='Append new annotation xml files to existed output KorAP-XML-s',
nargs='?',
type=str2bool, const=True, default=False)
args = parser.parse_args()
input_clean_files = {os.path.splitext(os.path.basename(clean_filepath))[0]: clean_filepath
for clean_filepath in iglob(args.input_clean_iglob_filepath, recursive=True)}
input_noske_files = {}
for noske_file in sorted(args.input_noske_filepath):
# Noske filenames start with source., this is the part which is cut down from filename
noske_to_clean_fname = os.path.basename(noske_file)[7:]
clean_file = input_clean_files.get(os.path.splitext(noske_to_clean_fname)[0], '')
if len(clean_file) == 0:
# Searching for files which were originally one file in clean xml but later splitted in noske
search_and_match = PAT_SPLITTED_FILES.search(noske_to_clean_fname)
# PAT_SPLITTED_FILES = re.compile(r'(.*?)(?:_\d{3})(\.clean)?\.mxml')
if search_and_match:
group_2 = search_and_match.group(2) or ''
if f'{search_and_match.group(1)}{group_2}' in input_clean_files:
clean_file = input_clean_files[f'{search_and_match.group(1)}{group_2}']
if len(clean_file) == 0:
print(f'Failed to find MNSZ clean file for metadata for {noske_to_clean_fname}')
continue
input_noske_files[noske_file] = clean_file
args.input_noske_filepath = input_noske_files
return vars(args)
def main():
args = get_args()
if args['create_new'] and not args['append']:
try:
shutil.rmtree(args['output_dir'])
except FileNotFoundError:
pass
# Az legutóbbi kovertálás során keletkezett legutolsó fájl sorszámának betöltése
last_parent_folder_number, \
last_child_folder_number, \
last_documentum_name, \
processed_documents = loading_backup_file(args['backup_filepath'], args['create_new'])
corpora_dir = args['output_dir']
# Noske fájlok az annotációk kinyeréséhez
noske_inp = read(args['input_noske_filepath'], last_parent_folder_number, processed_documents)
# Clean fájlok a metaadatok kinyeréséhez (headerek)
outp = process_documents(noske_inp, corpora_dir, last_parent_folder_number, last_child_folder_number,
args['backup_filepath'])
for outpf in outp:
parent_dir = ''.join(outpf[1])
child_dir = outpf[2]
annot_folder = outpf[0]['annot_folder']
os.makedirs(os.path.join(corpora_dir, parent_dir, child_dir, annot_folder), exist_ok=True)
with open(os.path.join(corpora_dir, parent_dir, child_dir,
annot_folder, os.path.splitext(outpf[0]['output_xmlname'])[0] + '.xml'),
"w", encoding="utf-8") as f:
if 'data' in outpf[0]['output_xmlname']:
# A prettify() két szóközt és egy entert rak a szöveg elejére, ami később problémát
# okozott az indexelésnél
f.write(f'{outpf[0]["output_xml"]}')
else:
f.write(f'{outpf[0]["output_xml"].prettify()}')
if __name__ == '__main__':
main()
|
989,822 | 373b0a688dff85d428760f0558f6ed7862eb47dc | from datetime import datetime
kuupäev_kellaeg = datetime.today()
print("Kuupäev ja kellaeg: " + str(kuupäev_kellaeg))
fail = open("paevik.txt", "a") # fail avatakse juurde kirjutamiseks
sissekanne = input('Sisesta sissekande tekst: ')
fail.write(str(datetime.today()) + '\n' + sissekanne + '\n' + '\n')
fail.close()
|
989,823 | eedda26166487384031cad07decc867fc2a0faf7 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns, static, settings
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'genco.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
#url(r'^time/$', current_datetime),
#url(r'^admin/', include(admin.site.urls)),
#(r'^', include('app.urls')),
url(r'^gencoui/', include('gencoui.urls',namespace="gencoui")),
url(r'^login/', include('login.urls',namespace="login")),
#url(r'^login/$', auth_views.login, name='login'),
#url(r'^logout/$', auth_views.logout, {'next_page': '/login'}, name='logout'),
#url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
989,824 | 53c0af10aec0b013102162b7c0830baef7fb112f | import django_filters.rest_framework
from rest_framework import viewsets, permissions
from app.arts.models import ArtWork
from app.arts.serializers import ArtWorkSerializer
class ArtWorkViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = []
serializer_class = ArtWorkSerializer
queryset = ArtWork.objects.filter(show=True)
filter_backends = [django_filters.rest_framework.DjangoFilterBackend]
filterset_fields = 'type',
pagination_class = None
|
989,825 | 3ee44c71b6af1c2596c834e70e8323695985007a | # Char creation file
import curses
import modules.menu as menu
from modules.db_functions import db_create_connection, db_return_class_object, db_select_values, \
db_insert_class_in_table, db_insert_inventory_char_creation
from modules.main_game import start_main
from modules.custom_classes import *
from modules.functions import *
# This function lets you pick your race
def char_creation():
clear_screen()
races_list = []
conn = db_create_connection('db/dnd.db')
results = db_select_values(conn, 'races', 'name')
for row in results:
races_list.append(row['name'])
conn.close()
item_dict = list_to_num_dict(races_list)
typed_print("Now let's pick a race!")
print()
print_list(item_dict, var_type='dict')
print()
typed_print(f'Please choose one of the above or cancel character creation {cb}[?,c]{ce}:{cb} ', new_line=False)
while True:
menu_choice = input().lower()
print(ce, end='')
if menu_choice in item_dict.keys():
return new_char_race(item_dict[menu_choice])
elif menu_choice == 'c':
return menu.start_menu()
else:
typed_print(f'Invalid option! Enter a number or c to return to admin menu! '
f'{cb}[?,c]{ce}:{cb} ', new_line=False)
# This function creates the chosen race and displays the results
def new_char_race(race):
# This creates the new_char_stats dictionary, pulls the race settings from the races.py file
# and randomly creates the details of the character using the parameters specified in the races file.
conn = db_create_connection('db/dnd.db')
pulled_race: Race = db_return_class_object(conn, 'races', 'name', race, Race)
conn.close()
first_run: bool = True
def roll_char():
pre_char_build = Player(Player_race=pulled_race.Name)
pre_char_build.Str = dice(6, rolls=3, reroll_ones=True) + pulled_race.Str
pre_char_build.Dex = dice(6, rolls=3, reroll_ones=True) + pulled_race.Dex
pre_char_build.Con = dice(6, rolls=3, reroll_ones=True) + pulled_race.Con
pre_char_build.Wis = dice(6, rolls=3, reroll_ones=True) + pulled_race.Wis
pre_char_build.Int = dice(6, rolls=3, reroll_ones=True) + pulled_race.Int
pre_char_build.Cha = dice(6, rolls=3, reroll_ones=True) + pulled_race.Cha
pre_char_build.Height = feet_inch(randint(int(pulled_race.Height[0]),
int(pulled_race.Height[1])))
pre_char_build.Weight = randint(pulled_race.Weight[0], pulled_race.Weight[1])
pre_char_build.Age = randint(pulled_race.Age[0], pulled_race.Age[1])
# Here we figure out what the modifiers are for the above rolled stats
str_bonus = stat_bonus(int(pre_char_build.Str), colored=True)
dex_bonus = stat_bonus(int(pre_char_build.Dex), colored=True)
con_bonus = stat_bonus(int(pre_char_build.Con), colored=True)
wis_bonus = stat_bonus(int(pre_char_build.Wis), colored=True)
int_bonus = stat_bonus(int(pre_char_build.Int), colored=True)
cha_bonus = stat_bonus(int(pre_char_build.Cha), colored=True)
clear_screen()
# Here we start printing out the created character stats
typed_print('Here are your characters stats:')
print()
typed_print(f"Race: {cb}{pre_char_build.Player_race}{ce}")
typed_print(f"Height: {cb}{pre_char_build.Height}{ce}")
typed_print(f"Weight: {cb}{pre_char_build.Weight} lbs{ce}")
typed_print(f"Age: {cb}{pre_char_build.Age}{ce}")
print()
typed_print(f"{'Attribute':<14} {'Stat':<4} Mod")
typed_print('-----------------------')
typed_print(f"{'Strength:':<14} {cb}{pre_char_build.Str:<4}{ce} {str(str_bonus):>2}")
typed_print(f"{'Dexterity:':<14} {cb}{pre_char_build.Dex:<4}{ce} {str(dex_bonus):>2}")
typed_print(f"{'Constitution:':<14} {cb}{pre_char_build.Con:<4}{ce} {str(con_bonus):>2}")
typed_print(f"{'Wisdom:':<14} {cb}{pre_char_build.Wis:<4}{ce} {str(wis_bonus):>2}")
typed_print(f"{'Intelligence:':<14} {cb}{pre_char_build.Int:<4}{ce} {str(int_bonus):>2}")
typed_print(f"{'Charisma:':<14} {cb}{pre_char_build.Cha:<4}{ce} {str(cha_bonus):>2}")
print()
typed_print(f"Do you want to {cb}(C){ce}ancel creation, {cb}(R){ce}eroll, "
f"or {cb}(A){ce}ccept these stats? {cb}[c,r,a]{ce}:{cb} ", new_line=False)
return pre_char_build
char_build: Player = Player()
if first_run:
first_run ^= first_run
char_build = roll_char()
while True:
reroll = input()
print(ce, end='')
if reroll.lower() == "r":
char_build = roll_char()
continue
elif reroll.lower() == 'c':
return menu.start_menu()
elif reroll.lower() == 'a':
return char_class_choice(char_build)
else:
typed_print('Invalid choice! Choose (C)ancel creation, (R)eroll, or (A)ccept! [c,r,a]: ')
# This function is for choosing a class. The new_char_stats dictionary was passed to this function that was
# created in the previous function. This is so it can then be passed on and added to by the class creation function
def char_class_choice(char_build: Player):
clear_screen()
pulled_archetypes = []
conn = db_create_connection()
archetype_returned = db_select_values(conn, 'archetype', 'name')
for row in archetype_returned:
pulled_archetypes.append(row['name'])
conn.close()
item_dict = list_to_num_dict(pulled_archetypes)
typed_print(f'Now choose an Archetype for your {char_build.Player_race}!')
print()
print_list(item_dict, var_type='dict')
print()
typed_print(f'Please choose a class or ({cb}C{ce})ancel character creation {cb}[?,c]{ce}:{cb} ', new_line=False)
while True:
menu_choice = input().lower()
print(ce, end='')
if menu_choice in item_dict.keys():
return char_class_build(char_build, item_dict[menu_choice])
elif menu_choice == 'c':
return {'Success': False}
else:
typed_print(f'Invalid option! Enter a number or ({cb}C{ce})ancel character creation! '
f'{cb}[?,c]{ce}:{cb} ', new_line=False)
# Once a class is chosen, here we start building the final aspects of the character, the new_char_stats dictionary
# has been passed down to the function and renamed char_stats
def char_class_build(char_build: Player, player_choice: str) -> dict:
conn = db_create_connection()
pulled_archetype: Archetype = db_return_class_object(conn, 'archetype', 'name', player_choice, Archetype)
char_build.Player_type = pulled_archetype.Name
# Here we're going to roll for hit points, breaking the processes out into the different parts so we can
# lay it all out for the user then add the total hit points rolled into the dictionary
try:
hit_die = pulled_archetype.Hit_die
con_mod = stat_bonus(char_build.Con)
dex_mod = stat_bonus(char_build.Dex)
hp_roll = dice(hit_die, reroll_ones=True)
tot_hp = hp_roll + con_mod + 8
this_class = char_build.Player_type
this_race = char_build.Player_race
char_build.Max_HP = tot_hp
# Now well figure out the base AC (10 + Dex mod) and add that to the dataclass
char_build.AC = 10 + stat_bonus(char_build.Dex)
# And the carry weight
char_build.Carry_weight = 10 * char_build.Str
clear_screen()
typed_print(f'You have chosen to become a {this_race} {this_class}!')
print()
typed_print(f'Every race starts with {cb}8{ce} hit points. ')
typed_print(f'You rolled a {cb}d{hit_die}{ce} for class hit points getting a roll of {cb}{hp_roll}{ce}.')
typed_print(f'With your constitution modifier of {cb if con_mod >= 0 else cr}{con_mod}{ce} '
f'your total hit points are now {cb}{tot_hp}{ce}')
print()
typed_print(f'Your base armor class will be {cb}{char_build.AC}{ce}. '
f'(10 + Dexterity modifier of {cb if dex_mod >= 0 else cr}{dex_mod}{ce})')
print()
typed_print(f'Your carry weight will be {cb}{char_build.Carry_weight} lbs{ce} This is figured by'
f'(10 x Strength)')
typed_print('Now enter a name for your character, then review character creation: ', new_line=False)
char_build.Player_name = input()
clear_screen()
# Here we figure out what the final stats and modifiers are
typed_print('Here are your final characters stats:')
print()
typed_print(f"You are a {cb}{char_build.Player_race} {char_build.Player_type}{ce}"
f" named {cy}{char_build.Player_name}{ce}.")
print()
typed_print(f"{'Height:':<14} {cb}{char_build.Height}{ce}")
typed_print(f"{'Weight:':<14} {cb}{char_build.Weight} lbs{ce}")
typed_print(f"{'Age:':<14} {cb}{char_build.Age}{ce}")
typed_print(f"{'Hit points:':<14} {cb}{char_build.Max_HP}{ce}")
typed_print(f"{'Armor Class:':<14} {cb}{char_build.AC}{ce}")
typed_print(f"{'Max Load:':14} {cb}{char_build.Carry_weight}{ce}")
print()
typed_print(f"{cbol}{lg}{'Attribute':<14} {'Stat':<4} Mod{ce}")
typed_print('-----------------------')
typed_print(f"{'Strength:':<14} {cb}{char_build.Str:<4}{ce} {stat_bonus(char_build.Str, colored=True)}")
typed_print(f"{'Dexterity:':<14} {cb}{char_build.Dex:<4}{ce} {stat_bonus(char_build.Dex, colored=True)}")
typed_print(f"{'Constitution:':<14} {cb}{char_build.Con:<4}{ce} {stat_bonus(char_build.Con, colored=True)}")
typed_print(f"{'Wisdom:':<14} {cb}{char_build.Wis:<4}{ce} {stat_bonus(char_build.Wis, colored=True)}")
typed_print(f"{'Intelligence:':<14} {cb}{char_build.Int:<4}{ce} {stat_bonus(char_build.Int, colored=True)}")
typed_print(f"{'Charisma:':<14} {cb}{char_build.Cha:<4}{ce} {stat_bonus(char_build.Cha, colored=True)}")
print()
typed_print('Choose (A)ccept to continue with this character or (C) to try again [a,c]: ', new_line=False)
while True:
final_choice = input()
if final_choice.lower() == 'a':
char_build.Current_HP = char_build.Max_HP
# Figure out what the starting inventory is
starting_inv = {}
num = 0
conn = db_create_connection()
for each in pulled_archetype.Items:
item = db_return_class_object(conn, 'items', 'name', each, Items)
starting_inv[num] = map_items_to_inventory(item, char_build.Player_name)
num += 1
# write it all off to the DB
db_insert_class_in_table(conn, char_build, 'saves')
db_insert_inventory_char_creation(conn, starting_inv)
conn.close()
return curses.wrapper(start_main, char_build)
elif final_choice.lower() == 'c':
conn.close()
return menu.start_menu()
else:
typed_print('Choice was not valid. Enter A or C! [a,c]: ', new_line=False)
except Exception as ex:
print(f'Something went wrong in final character creation: {ex}')
input('Press enter to continue to start menu....')
return menu.start_menu()
|
989,826 | 3c1d5908e083dd49693b8bc93f501cd9a98d4d59 | """add user table
Revision ID: 1478867a872a
Revises: 657669eb5fcb
Create Date: 2020-08-06 00:35:31.088631
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1478867a872a'
down_revision = '657669eb5fcb'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_user'))
)
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_user_username'), ['username'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_username'))
op.drop_table('user')
# ### end Alembic commands ###
|
989,827 | 90c37716cc70980e2cc8a9b2d54c53253f94a82e | import numbers
import numpy as np
LIMIT = 99999999
# BBoxes are [x1, y1, x2, y2]
def clip_bbox(bboxes, min_clip, max_x_clip, max_y_clip):
bboxes_out = bboxes
added_axis = False
if len(bboxes_out.shape) == 1:
added_axis = True
bboxes_out = bboxes_out[:, np.newaxis]
bboxes_out[[0, 2]] = np.clip(bboxes_out[[0, 2]], min_clip, max_x_clip)
bboxes_out[[1, 3]] = np.clip(bboxes_out[[1, 3]], min_clip, max_y_clip)
if added_axis:
bboxes_out = bboxes_out[:, 0]
return bboxes_out
# [x1 y1, x2, y2] to [xMid, yMid, width, height]
def xyxy_to_xywh(bboxes, clip_min=-LIMIT, clip_width=LIMIT, clip_height=LIMIT, round=False):
added_axis = False
if isinstance(bboxes, list):
bboxes = np.array(bboxes).astype(np.float32)
if len(bboxes.shape) == 1:
added_axis = True
bboxes = bboxes[:, np.newaxis]
bboxes_out = np.zeros(bboxes.shape)
x1 = bboxes[0]
y1 = bboxes[1]
x2 = bboxes[2]
y2 = bboxes[3]
bboxes_out[0] = (x1 + x2) / 2.0
bboxes_out[1] = (y1 + y2) / 2.0
bboxes_out[2] = x2 - x1
bboxes_out[3] = y2 - y1
if clip_min != -LIMIT or clip_width != LIMIT or clip_height != LIMIT:
bboxes_out = clip_bbox(bboxes_out, clip_min, clip_width, clip_height)
if bboxes_out.shape[0] > 4:
bboxes_out[4:] = bboxes[4:]
if added_axis:
bboxes_out = bboxes_out[:, 0]
if round:
bboxes_out = np.round(bboxes_out).astype(int)
return bboxes_out
# [xMid, yMid, width, height] to [x1 y1, x2, y2]
def xywh_to_xyxy(bboxes, clip_min=-LIMIT, clip_width=LIMIT, clip_height=LIMIT, round=False):
added_axis = False
if isinstance(bboxes, list):
bboxes = np.array(bboxes).astype(np.float32)
if len(bboxes.shape) == 1:
added_axis = True
bboxes = bboxes[:, np.newaxis]
bboxes_out = np.zeros(bboxes.shape)
xMid = bboxes[0]
yMid = bboxes[1]
width = bboxes[2]
height = bboxes[3]
bboxes_out[0] = xMid - width / 2.0
bboxes_out[1] = yMid - height / 2.0
bboxes_out[2] = xMid + width / 2.0
bboxes_out[3] = yMid + height / 2.0
if clip_min != -LIMIT or clip_width != LIMIT or clip_height != LIMIT:
bboxes_out = clip_bbox(bboxes_out, clip_min, clip_width, clip_height)
if bboxes_out.shape[0] > 4:
bboxes_out[4:] = bboxes[4:]
if added_axis:
bboxes_out = bboxes_out[:, 0]
if round:
bboxes_out = np.round(bboxes_out).astype(int)
return bboxes_out
# @bboxes {np.array} 4xn array of boxes to be scaled
# @scalars{number or arraylike} scalars for width and height of boxes
# @in_place{bool} If false, creates new bboxes.
def scale_bbox(bboxes, scalars, clip_min=-LIMIT, clip_width=LIMIT, clip_height=LIMIT, round=False, in_place=False):
added_axis = False
if isinstance(bboxes, list):
bboxes = np.array(bboxes, dtype=np.float32)
if len(bboxes.shape) == 1:
added_axis = True
bboxes = bboxes[:, np.newaxis]
if isinstance(scalars, numbers.Number):
scalars = np.full((2, bboxes.shape[1]), scalars, dtype=np.float32)
if not isinstance(scalars, np.ndarray):
scalars = np.array(scalars, dtype=np.float32)
if len(scalars.shape) == 1:
scalars = np.tile(scalars[:, np.newaxis], (1, bboxes.shape[1])).astype(np.float32)
bboxes = bboxes.astype(np.float32)
width = bboxes[2] - bboxes[0]
height = bboxes[3] - bboxes[1]
x_mid = (bboxes[0] + bboxes[2]) / 2.0
y_mid = (bboxes[1] + bboxes[3]) / 2.0
if not in_place:
bboxes_out = bboxes.copy()
else:
bboxes_out = bboxes
bboxes_out[0] = x_mid - width * scalars[0] / 2.0
bboxes_out[1] = y_mid - height * scalars[1] / 2.0
bboxes_out[2] = x_mid + width * scalars[0] / 2.0
bboxes_out[3] = y_mid + height * scalars[1] / 2.0
if clip_min != -LIMIT or clip_width != LIMIT or clip_height != LIMIT:
bboxes_out = clip_bbox(bboxes_out, clip_min, clip_width, clip_height)
if added_axis:
bboxes_out = bboxes_out[:, 0]
if round:
bboxes_out = np.round(bboxes_out).astype(np.int32)
return bboxes_out
def make_square(bboxes, in_place=False):
if isinstance(bboxes, list):
bboxes = np.array(bboxes).astype(np.float32)
if len(bboxes.shape) == 1:
num_boxes = 1
width = bboxes[2] - bboxes[0]
height = bboxes[3] - bboxes[1]
else:
num_boxes = bboxes.shape[1]
width = bboxes[2] - bboxes[0]
height = bboxes[3] - bboxes[1]
max_size = np.maximum(width, height)
scalars = np.zeros((2, num_boxes))
scalars[0] = max_size * 1.0 / width
scalars[1] = max_size * 1.0 / height
return scale_bbox(bboxes, scalars, in_place=in_place)
# Converts from the full image coordinate system to range 0:crop_padding. Useful for getting the coordinates
# of a bounding box from image coordinates to the location within the cropped image.
# @bbox_to_change xyxy bbox whose coordinates will be converted to the new reference frame
# @crop_location xyxy box of the new origin and max points (without padding)
# @crop_padding the amount to pad the crop_location box (1 would be keep it the same, 2 would be doubled)
# @crop_size the maximum size of the coordinate frame of bbox_to_change.
def to_crop_coordinate_system(bbox_to_change, crop_location, crop_padding, crop_size):
if isinstance(bbox_to_change, list):
bbox_to_change = np.array(bbox_to_change)
if isinstance(crop_location, list):
crop_location = np.array(crop_location)
bbox_to_change = bbox_to_change.astype(np.float32)
crop_location = crop_location.astype(np.float32)
crop_location = scale_bbox(crop_location, crop_padding)
crop_location_xywh = xyxy_to_xywh(crop_location)
bbox_to_change -= crop_location[[0, 1, 0, 1]]
bbox_to_change *= crop_size / crop_location_xywh[[2, 3, 2, 3]]
return bbox_to_change
# Inverts the transformation from to_crop_coordinate_system
# @crop_size the maximum size of the coordinate frame of bbox_to_change.
def from_crop_coordinate_system(bbox_to_change, crop_location, crop_padding, crop_size):
if isinstance(bbox_to_change, list):
bbox_to_change = np.array(bbox_to_change)
if isinstance(crop_location, list):
crop_location = np.array(crop_location)
bbox_to_change = bbox_to_change.astype(np.float32)
crop_location = crop_location.astype(np.float32)
crop_location = scale_bbox(crop_location, crop_padding)
crop_location_xywh = xyxy_to_xywh(crop_location)
bbox_to_change *= crop_location_xywh[[2, 3, 2, 3]] / crop_size
bbox_to_change += crop_location[[0, 1, 0, 1]]
return bbox_to_change
"""
@rects1 numpy dx4 matrix of bounding boxes
@rect2 single numpy 1x4 matrix of bounding box
@return dx1 IOUs
Rectangles are [x1, y1, x2, y2]
"""
def IOU_numpy(rects1, rect2):
# intersection = np.fmin(np.zeros((rects1.shape[0],1))
(d, n) = rects1.shape
x1s = np.fmax(rects1[:, 0], rect2[0])
x2s = np.fmin(rects1[:, 2], rect2[2])
y1s = np.fmax(rects1[:, 1], rect2[1])
y2s = np.fmin(rects1[:, 3], rect2[3])
ws = np.fmax(x2s - x1s, 0)
hs = np.fmax(y2s - y1s, 0)
intersection = ws * hs
rects1Area = (rects1[:, 2] - rects1[:, 0]) * (rects1[:, 3] - rects1[:, 1])
rect2Area = (rect2[2] - rect2[0]) * (rect2[3] - rect2[1])
union = np.fmax(rects1Area + rect2Area - intersection, 0.00001)
return intersection * 1.0 / union
def IOU_lists(rects1, rects2):
(d, n) = rects1.shape
x1s = np.fmax(rects1[:, 0], rects2[:, 0])
x2s = np.fmin(rects1[:, 2], rects2[:, 2])
y1s = np.fmax(rects1[:, 1], rects2[:, 1])
y2s = np.fmin(rects1[:, 3], rects2[:, 3])
ws = np.fmax(x2s - x1s, 0)
hs = np.fmax(y2s - y1s, 0)
intersection = ws * hs
rects1Area = (rects1[:, 2] - rects1[:, 0]) * (rects1[:, 3] - rects1[:, 1])
rects2Area = (rects2[:, 2] - rects2[:, 0]) * (rects2[:, 3] - rects2[:, 1])
union = np.fmax(rects1Area + rects2Area - intersection, 0.00001)
return intersection * 1.0 / union
# Rectangles are [x1, y1, x2, y2]
def IOU(rect1, rect2):
if not isinstance(rect1, np.ndarray):
rect1 = np.array(rect1)
if not isinstance(rect2, np.ndarray):
rect2 = np.array(rect2)
rect1 = [min(rect1[[0, 2]]), min(rect1[[1, 3]]), max(rect1[[0, 2]]), max(rect1[[1, 3]])]
rect2 = [min(rect2[[0, 2]]), min(rect2[[1, 3]]), max(rect2[[0, 2]]), max(rect2[[1, 3]])]
intersection = max(0, min(rect1[2], rect2[2]) - max(rect1[0], rect2[0])) * max(
0, min(rect1[3], rect2[3]) - max(rect1[1], rect2[1])
)
union = (rect1[2] - rect1[0]) * (rect1[3] - rect1[1]) + (rect2[2] - rect2[0]) * (rect2[3] - rect2[1]) - intersection
return intersection * 1.0 / max(union, 0.00001)
def intersection(rect1, rect2):
return max(0, min(rect1[2], rect2[2]) - max(rect1[0], rect2[0])) * max(
0, min(rect1[3], rect2[3]) - max(rect1[1], rect2[1])
)
"""
@rects1 numpy dx5 matrix of bounding boxes
@rect2 single numpy 1x4 matrix of bounding box
@return nx5 rects where n is number of rects over overlapThresh
Rectangles are [x1, y1, x2, y2, 0]
"""
def get_overlapping_boxes(rects1, rect2, overlapThresh=0.001):
x1s = np.fmax(rects1[:, 0], rect2[0])
x2s = np.fmin(rects1[:, 2], rect2[2])
y1s = np.fmax(rects1[:, 1], rect2[1])
y2s = np.fmin(rects1[:, 3], rect2[3])
ws = np.fmax(x2s - x1s, 0)
hs = np.fmax(y2s - y1s, 0)
intersection = ws * hs
rects1Area = (rects1[:, 2] - rects1[:, 0]) * (rects1[:, 3] - rects1[:, 1])
rect2Area = (rect2[2] - rect2[0]) * (rect2[3] - rect2[1])
union = np.fmax(rects1Area + rect2Area - intersection, 0.00001)
ious = intersection * 1.0 / union
rects1[:, 4] = ious
rects1 = rects1[ious > overlapThresh, :]
return rects1
"""
@rects1 numpy dx4 matrix of bounding boxes
@rect2 single numpy 1x4 matrix of bounding box
@return number of rects over overlapThresh
Rectangles are [x1, y1, x2, y2]
"""
def count_overlapping_boxes(rects1, rect2, overlapThresh=0.001):
if rects1.shape[1] == 0:
return 0
x1s = np.fmax(rects1[:, 0], rect2[0])
x2s = np.fmin(rects1[:, 2], rect2[2])
y1s = np.fmax(rects1[:, 1], rect2[1])
y2s = np.fmin(rects1[:, 3], rect2[3])
ws = np.fmax(x2s - x1s, 0)
hs = np.fmax(y2s - y1s, 0)
intersection = ws * hs
rects1Area = (rects1[:, 2] - rects1[:, 0]) * (rects1[:, 3] - rects1[:, 1])
rect2Area = (rect2[2] - rect2[0]) * (rect2[3] - rect2[1])
union = np.fmax(rects1Area + rect2Area - intersection, 0.00001)
ious = intersection * 1.0 / union
return np.sum(ious > overlapThresh)
|
989,828 | b6d2996e1a9e984742178e55ad93891a8b7f2935 | #!/usr/bin/env python
"""@package DumpMetadataToiRODSXML
@brief Dump EcohydroLib metadata to iRODS XML metadata import format
This software is provided free of charge under the New BSD License. Please see
the following license information:
Copyright (c) 2013, University of North Carolina at Chapel Hill
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of North Carolina at Chapel Hill nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF NORTH CAROLINA AT CHAPEL HILL
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@author Brian Miles <brian_miles@unc.edu>
Pre conditions:
--------------
1. Metadata exist for the specified project directory
Post conditions:
----------------
2. A file named metadata.xml will be written to the specified project directory
Usage:
@code
DumpMetadataToiRODSXML.py -p /path/to/project_dir -c /irods/collection/path
@endcode
"""
import os
import codecs
import argparse
from xml.sax.saxutils import escape
from ecohydrolib.context import Context
from ecohydrolib.metadata import GenericMetadata
from ecohydrolib.metadata import AssetProvenance
PATH_SEP_IRODS = '/'
OUTFILE_NAME = 'metadata.xml'
def writeAVUToXMLFile(outfile, target, attribute, value, unit=None):
""" Write Attribute, Value, Unit (AVU) element to iRODS metadata XML file
@param outfile StreamWriter representing the XML file. It is assumed that opening
<metadata> element has already been written to the file
@param target String representing the contents of the Target element
@param attribute String representing the contents of the Attribute element
@param value String representing the contents of the Value element
@param unit String representing the contents of the Unit element.
If None, an empty element will be written
"""
outfile.write('\t<AVU>\n')
outfile.write("\t\t<Target>%s</Target>\n" % (escape(target),))
outfile.write("\t\t<Attribute>%s</Attribute>\n" % (escape(attribute),) )
outfile.write("\t\t<Value>%s</Value>\n" % (escape(value),) )
if unit:
outfile.write("\t\t<Unit>%s</Unit>\n" % (unit,) )
else:
outfile.write('\t\t<Unit />\n')
outfile.write('\t</AVU>\n')
def writeDictToXMLFile(outfile, target, dict):
""" Write the contents of a dict as AVU elements in an iRODS metadata XML file
@param outfile StreamWriter representing the XML file. It is assumed that opening
<metadata> element has already been written to the file
@param target String representing the contents of the Target element
@param dict The dictionary whose key will serve as attributes and whose values will serve as
values. Units will be written as empty elements for each AVU written
"""
targetStr = "\t\t<Target>%s</Target>\n" % (escape(target),)
for key in dict.keys():
outfile.write('\t<AVU>\n')
outfile.write(targetStr)
outfile.write("\t\t<Attribute>%s</Attribute>\n" % (escape(key),) )
outfile.write("\t\t<Value>%s</Value>\n" % (escape(dict[key]),) )
outfile.write('\t\t<Unit />\n')
outfile.write('\t</AVU>\n')
parser = argparse.ArgumentParser(description='Dump point climate station information from EcohydroLib metadata to standard output')
parser.add_argument('-p', '--projectDir', dest='projectDir', required=True,
help='The directory from which metadata should be read')
parser.add_argument('-c', '--collection', dest='collection', required=True,
help='The iRODS collection corresponding to the project directory')
args = parser.parse_args()
context = Context(args.projectDir, None)
# Make sure there's no trailing PATH_SEP_IRODS on the collection
collection = args.collection.rstrip(PATH_SEP_IRODS)
outfilePath = os.path.join(context.projectDir, OUTFILE_NAME)
outfile = codecs.getwriter('utf-8')(open(outfilePath, 'w'))
outfile.write('<?xml version="1.0" encoding="UTF-8" ?>\n')
outfile.write('<metadata>\n')
# Write study area metadata to collection root
writeDictToXMLFile(outfile, collection, GenericMetadata.readStudyAreaEntries(context))
# Write processing history to collection root
history = GenericMetadata.getProcessingHistoryList(context)
i = 1
for entry in history:
attribute = "processing_step_%d" % (i,); i += 1
writeAVUToXMLFile(outfile, collection, attribute, entry)
# Write provenance to each item in the manifest
provenance = GenericMetadata.readAssetProvenanceObjects(context)
for entry in provenance:
target = collection + PATH_SEP_IRODS + entry.dcIdentifier
writeAVUToXMLFile(outfile, target, 'name', entry.name)
writeAVUToXMLFile(outfile, target, 'dc.source', entry.dcSource)
writeAVUToXMLFile(outfile, target, 'dc.title', entry.dcTitle)
writeAVUToXMLFile(outfile, target, 'dc.date', entry.dcDate.strftime(AssetProvenance.FMT_DATE))
writeAVUToXMLFile(outfile, target, 'dc.publisher', entry.dcPublisher)
writeAVUToXMLFile(outfile, target, 'dc.description', entry.dcDescription)
# Write point climate station metadata to the data file for that station
stations = GenericMetadata.readClimatePointStations(context)
for station in stations:
target = collection + PATH_SEP_IRODS + station.data
writeAVUToXMLFile(outfile, target, 'id', station.id)
writeAVUToXMLFile(outfile, target, 'name', station.name)
writeAVUToXMLFile(outfile, target, 'longitude', str(station.longitude), 'WGS84 degrees')
writeAVUToXMLFile(outfile, target, 'latitude', str(station.latitude), 'WGS84 degrees')
writeAVUToXMLFile(outfile, target, 'elevation', str(station.elevation), 'meters')
outfile.write('</metadata>\n')
outfile.close() |
989,829 | 21e9c74551fc5775d2bdfd5391a717d1ae1d4ed6 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 16:26:33 2020
@author: MT_Eleus
"""
import urllib.request
public_ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
with open('foundip.txt', 'w') as f:
f.write(public_ip)
|
989,830 | df70a681f80509e93e5e927cb1e78f48bb836e91 | def content(function):
function.write("from pynput.keyboard import Key, Listener\nimport logging\n\nlogging.basicConfig(filename='your log file path', level=logging.DEBUG, format=' %(asctime)s - %(message)s')\n\ndef on_press(key):\n logging.info(str(key))\n\nwith Listener(on_press=on_press) as listener:\n listener.join()")
|
989,831 | 77dea882fc052531943982a456233c495b30fa20 | from collections import defaultdict
from re import compile
reindeer = dict()
regex = compile(r"^([^ ]+) can fly (\d+) km/s for (\d+) seconds, but then must rest for (\d+) seconds.$")
fh = open('14.txt', 'r')
for line in fh.readlines():
m = regex.match(line.strip())
if m:
reindeer[m.group(1)] = map(int, (m.group(2), m.group(3), m.group(4)))
else:
print "ERROR : " + line
#Test:
#reindeer = {"Comet" : [14,10,127], "Dancer": [16, 11, 162]}
class State:
def __init__(self, data):
self.distance = 0
self.points = 0
self.state = "Running"
self.stateChange = data[1]
self.speed = data[0]
self.run = data[1]
self.rest = data[2]
results = dict()
for name, data in reindeer.iteritems():
results[name] = State(data)
sec = 0
while sec < 2503:
sec += 1
for name, data in reindeer.iteritems():
state = results[name]
if state.state == "Running":
state.distance += state.speed
if sec == state.stateChange:
if state.state == "Running":
state.state = "Resting"
state.stateChange = sec + state.rest
else:
state.state = "Running"
state.stateChange = sec + state.run
maxValue = max([r.distance for r in results.values()])
top = [r for r in results.keys() if results[r].distance == maxValue]
for r in top:
results[r].points += 1
print [r + " " + str(results[r].distance) for r in results]
print [r + " " + str(results[r].points) for r in results]
|
989,832 | 56a013d7e315af6e743d7613f6b5483c1910b1eb | # Generated by Django 3.1.5 on 2021-01-21 15:35
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('tasks', '0004_auto_20210121_1504'),
]
operations = [
migrations.AddField(
model_name='task',
name='task_status',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='tasks.status'),
preserve_default=False,
),
migrations.AlterField(
model_name='job',
name='job_date',
field=models.TimeField(default=datetime.datetime(2021, 1, 21, 15, 34, 56, 785909, tzinfo=utc)),
),
]
|
989,833 | 8a00d1f46dee9e93f73a4638a791256e37ded7af | import argparse
from datetime import datetime, timedelta
import glob
from pathlib import Path
import re
# Regex patterns
HOURS = "[0-9]{2}"
MINUTES = "[0-5][0-9]"
SECONDS = "[0-5][0-9]"
MILLIS = "[0-9]{3}"
def convert_to_sec(hour, minute, sec, ms):
return hour * 3600 + minute * 60 + sec + ms/1000
def split_seconds(sec):
hours = int(sec // 3600)
assert(hours < 100)
sec %= 3600
mins = int(sec // 60)
sec %= 60
secs = int(sec // 1)
sec %= 1
ms = int(sec * 1000)
return hours, mins, secs, ms
def extract_time(time_str):
pattern = f"({HOURS}):({MINUTES}):({SECONDS}),({MILLIS})"
if m := re.match(pattern, time_str):
hours, mins, secs, ms = map(int, m.groups())
else:
raise RuntimeError(f"Time is not in the valid format: arg = {time_str}")
return convert_to_sec(hours, mins, secs, ms)
def secs_to_time_str(secs):
hours, mins, secs, ms = split_seconds(secs)
return f"{hours:0>2}:{mins:0>2}:{secs:0>2},{ms:0>3}"
def get_output_fname(filename):
p = Path(filename)
return f"{p.stem}_synched.{p.suffix}"
def main(filename, sec_to_add, from_sec=0):
time_str = f"{HOURS}:{MINUTES}:{SECONDS},{MILLIS}"
pattern = f"({time_str}) --> ({time_str})"
output_filename = get_output_fname(filename)
with open(filename, 'r', errors='replace') as f, open(output_filename, 'w') as output_f:
for line in f.readlines():
if m := re.match(pattern, line):
beg, end = map(extract_time, m.groups())
if beg < from_sec:
output_f.write(line)
continue
beg_sec, end_sec = sec_to_add + beg, sec_to_add + end
if beg_sec < 0:
raise RuntimeError("Shift results in negative time of {beg_sec}s")
new_beg, new_end = map(secs_to_time_str, [beg_sec, end_sec])
new_time = f"{new_beg} --> {new_end}\n"
output_f.write(new_time)
else:
output_f.write(line)
print(f"Output file: {output_filename}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Synchronize subtitle time")
parser.add_argument('filename', type=str, help='<filename>.srt subtitle file')
parser.add_argument('sec_to_add', type=float, help='Number of seconds to shift subtitle time')
parser.add_argument('--from', dest='from_sec', type=float, default=0., help='Time from which to apply time shift')
args = parser.parse_args()
main(args.filename, args.sec_to_add, args.from_sec)
|
989,834 | 32d2d74e18995c6330e4416ba7c85d1045763112 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 12:58:01 2017
@author: jabong
"""
from collections import namedtuple
def apriori(dataset, minsupport, maxsize):
from collections import defaultdict
baskets = defaultdict(list)
pointers = defaultdict(list)
for i, ds in enumerate(dataset):
for ell in ds:
pointers[ell].append(i)
baskets[frozenset([ell])].append(i)
new_pointers = dict()
for k in pointers:
if len(pointers[k]) >= minsupport:
new_pointers[k] = frozenset(pointers[k])
pointers = new_pointers
for k in baskets:
baskets[k] = frozenset(baskets[k])
valid = set()
for el, c in baskets.items():
if len(c) >= minsupport:
valid.update(el)
itemsets = [frozenset([v]) for v in valid]
freqsets = []
for i in range(maxsize-1):
print("At iteration {}, number of frequent baskets: {}".format(i, len(itemsets)))
newsets = []
for it in itemsets:
ccounts = baskets[it]
for v,pv in pointers.items():
if v not in it:
csup = (ccounts & pv)
if len(csup) >= minsupport:
new = frozenset(it | frozenset([v]))
if new not in baskets:
newsets.append(new)
baskets[new] = csup
freqsets.extend(itemsets)
itemsets = newsets
if not len(itemsets):
break
support = {}
for k in baskets:
support[k] = float(len(baskets[k]))
return freqsets, support
AssociationRule = namedtuple('AssociationRule', ['antecendent', 'consequent', 'base', 'py_x', 'lift'])
def association_rules(dataset, freqsets, support, minlift):
nr_transactions = float(len(dataset))
freqsets = [f for f in freqsets if len(f) > 1]
for fset in freqsets:
for f in fset:
consequent = frozenset([f])
antecendent = fset - consequent
py_x = support[fset] / support[antecendent]
base = support[consequent] / nr_transactions
lift = py_x / base
if lift > minlift:
yield AssociationRule(antecendent, consequent, base, py_x, lift) |
989,835 | b4bc98e1fa3068aebdd86741709c5e0ca9259542 | from django.http import HttpResponse
from django.views.generic import TemplateView, ListView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from django.forms import ModelForm, CharField, DateField, TextInput, IntegerField, DateInput, Select, ChoiceField
from students.models import Student
from django.shortcuts import render, redirect, get_object_or_404
GENDER = (
('Male', 'Male'),
('Female', 'Female.')
)
# Class Based Views
class StudentList(ListView):
model = Student
class StudentCreate(CreateView):
model = Student
fields = ['firstname', 'lastname', 'age', 'gender', 'joining_date']
success_url = reverse_lazy('students:student_list')
class StudentUpdate(UpdateView):
model = Student
fields = ['firstname', 'lastname', 'age', 'gender', 'joining_date']
success_url = reverse_lazy('students:student_list')
class StudentDelete(DeleteView):
model = Student
success_url = reverse_lazy('students:student_list')
# Function Based Views
class StudentForm(ModelForm):
firstname = CharField(widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'First Name'}))
lastname = CharField(widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'Last Name'}))
age = IntegerField(widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'Age'}))
joining_date = DateField(widget=DateInput(attrs={'class': 'form-control', 'placeholder': 'Joining Date'}))
gender = ChoiceField(choices=GENDER, widget=Select(attrs={'class':'form-control'}))
class Meta:
model = Student
fields = ['firstname', 'lastname', 'age', 'gender', 'joining_date']
def student_list(request, template_name='students/student_list.html'):
students = Student.objects.all()
data = {}
data['object_list'] = students
return render(request, template_name, data)
def student_create(request, template_name='students/student_form.html'):
form = StudentForm(request.POST or None)
if form.is_valid():
form.save()
return redirect('students:student_list')
return render(request, template_name, {'form': form})
def student_edit(request, pk, template_name='students/student_form.html'):
student = get_object_or_404(Student, pk=pk)
form = StudentForm(request.POST or None, instance=student)
if form.is_valid():
form.save()
return redirect('students:student_list')
return render(request, template_name, {'form': form})
def student_delete(request, pk, template_name='students/student_confirm_delete.html'):
student = get_object_or_404(Student, pk=pk)
if request.method == 'POST':
student.delete()
return redirect('students:student_list')
return render(request, template_name, {'object': student})
|
989,836 | e3846dcedaddcc5e12ba295103b8f7c73c5d0237 | """Get any necessary data for given symbols"""
import os
import numpy as np
import pandas as pd
import pycharts
class CompanyClient(pycharts.CompanyClient):
"""Child Class of pycharts Company Client that adds additional methods
to make it more convenient to get data from the YCharts API"""
def __init__(self, symbol_list):
YCHARTS_API_KEY = os.environ['YCHARTS_API_KEY']
super(CompanyClient, self).__init__(YCHARTS_API_KEY)
if isinstance(symbol_list, basestring):
symbol_list = [symbol_list]
self.symbol_list = map(str.upper, symbol_list)
def fetch_data(self, fields):
"""Fetch data for given fields
Parameters
----------
fields: list
Field(s) for which to fetch data
Returns
-------
data_dict: dictionary
{symbol_1: {field1: data1, field2: data2},
symbol_2: {field1: data1, field2: data2}
}
"""
n_symbols = len(self.symbol_list)
n_iters = n_symbols // 100 + 1
# data_dict keyed by symbol
data_dict = {}
for i in range(0, n_iters):
start = i*100
end = (i+1) * 100
response = self.get_points(self.symbol_list[start:end],
fields)['response']
for symbol in response:
if response[symbol]['meta']['status'] == 'ok':
symbol_data = response[symbol]['results']
data_dict[symbol] = {}
for data_point in symbol_data:
data_dict[symbol][data_point] = \
symbol_data[data_point]['data'][1]
else:
data_dict[symbol] = {field:np.nan for field in fields}
return data_dict
def to_dataframe(self, data_dict):
"""Convert data_dict5 from fetch data to a DataFrame"""
return pd.DataFrame.from_dict(data_dict, orient='index')
if __name__ == '__main__':
symbol_list = ['AAPL', 'xyz']
field_list = ['price', 'average_volume_30']
cc = CompanyClient( symbol_list)
from pprint import pprint
data = cc.fetch_data(field_list)
print cc.to_dataframe(data)
|
989,837 | 44ec4a158679824bcac70d41603ca9ab8dc53083 | from csv import DictReader
with open('12MCoronaTweets.csv', 'r', encoding="utf-8") as csv_obj:
csv = DictReader(x.replace('\0', '') for x in csv_obj)
rowCount = 0
rowCountCorona = 0
for row in csv:
print(rowCount)
#if rowCount == 1000000:
# break
resultado = []
llaves = ('corona', 'covid')
for coincidencia in llaves:
if coincidencia in row['text'].lower():
resultado.append(True)
else:
resultado.append(False)
if resultado[0] or resultado[1]:
rowCountCorona = rowCountCorona + 1
rowCount = rowCount + 1
porcentaje = rowCountCorona / (rowCount/100)
print(f'Numero de filas leidas: {rowCount}')
print(f'Numero de filas que presentan la palabra corona o covid: {rowCountCorona}')
print(f'Porcentaje respectivo de filas leidas: {porcentaje}')
|
989,838 | adf9eb216550485a9e71cc763414609d76317188 | import math
import matplotlib.pyplot as plt
def plot_stats_by_name(exp, stat_list, labels=None, ncols=4, figsize=(5, 3), titlesize=12):
assert len(stat_list) > 0
for stat_name in stat_list:
assert stat_name in exp.summary_keys, "{} is not a valid stat key".format(stat_name)
ncols = min(ncols, len(stat_list))
nrows = math.ceil(len(stat_list) / ncols)
fig, axarr = plt.subplots(ncols=ncols, nrows=nrows, figsize=(figsize[0] * ncols, figsize[1] * nrows))
axarr = axarr.flatten() if (ncols * nrows) > 1 else [axarr]
for ax_idx, stat_name in enumerate(stat_list):
ax = axarr[ax_idx]
stat_idx = exp.summary_keys.index(stat_name)
exp.plot_stat(stat_idx=stat_idx, ax=ax)
ax.set_title(stat_name if labels is None else labels[ax_idx], fontsize=titlesize)
for ax_idx in range(len(stat_list), len(axarr)):
axarr[ax_idx].axis('off')
|
989,839 | 6357fdbe6f5576225d2bbae74e8e8d05489724fe | import pandas as pd
import numpy as np
import os
from datetime import datetime
class Input:
def __init__(self):
additional_hour_conversion = 0
self.ramp_up_start_time = []
self.ramp_up_end_time = []
self.FMT = '%H:%M'
self.ramp_up_time_in_hours = 0
self.years = 15
self.life = 15
self.hours = 2
self.solar_ic = 8020
self.wind_ic = 8800
self.solar_ic_cur = 1680.74
self.wind_ic_cur = 59557
self.load_growth = 1.045 # Set to input
self.solar_growth = 1.2 # Set to input
self.wind_growth = 1.2 # Set to input
if self.life >= 14:
self.total_cycles = 7000
elif self.life == 12:
self.total_cycles = 5000
elif self.life == 10:
self.total_cycles = 4000
self.rt_efficiency = 0.85
self.dod = 0.94
if self.total_cycles == 4000:
self.degr = 0.8
if self.total_cycles == 5000:
self.degr = 0.7
if self.total_cycles == 7000:
self.degr = 0.7
self.scrap_percent = 0.1
if self.total_cycles == 4000:
self.cost_1hr = 45500000
self.cost_2hr = 62525000
if self.total_cycles == 5000:
self.cost_1hr = 50000000
self.cost_2hr = 66000000
if self.total_cycles == 7000:
self.cost_1hr = 53000000
self.cost_2hr = 70276036.47
self.constant_throughput = 0
self.bess_size_start = 10 # Input
self.bess_size_end = 50 # Input
self.bess_size_incr = 5 # Input
self.discount_rate = 0.089
self.costs = [8, 7.03, 3.63, 2.81]
self.eci = 0.03
self.loan_percent = 0.7
self.interest_rate = 0.09
self.tax_rate = 0.25
self.roe = 0.14 / (1 - self.tax_rate)
self.op_ex = 0.01
self.transmission_reduction_1hr = 1000000
self.transmission_reduction_2hr = 2000000
self.bess_cost_reduction_rate = 0.1
self.transformer_cost = 1300000
self.transformer_interest = 0.11
self.land_cost = 215278
self.trans_land_req = 20
self.average_tariff = 7
self.total_outage = 20
def set_ramp_up_time_selection(self, start_time, end_time):
self.ramp_up_start_time = start_time
self.ramp_up_end_time = end_time
self.ramp_up_start_time = datetime.strptime(self.ramp_up_start_time, self.FMT) \
- datetime.strptime("00:00", self.FMT)
self.ramp_up_end_time = datetime.strptime(self.ramp_up_end_time, self.FMT) \
- datetime.strptime("00:00", self.FMT)
self.ramp_up_start_time = float(self.ramp_up_start_time.total_seconds()/3600)
self.ramp_up_end_time = float(self.ramp_up_end_time.total_seconds()/3600)
def set_years(self, years):
self.years = years
def set_life(self, life):
self.life = life
def set_hours(self, hours):
self.hours = hours
def set_re_ic(self, solar, wind, solar_cur, wind_cur):
self.solar_ic = solar
self.wind_ic = wind
self.solar_ic_cur = solar_cur
self.wind_ic_cur = wind_cur
def set_growth_rates(self, input_values):
self.load_growth = 1 + input_values[0] / 100
self.solar_growth = 1 + input_values[1] / 100
self.wind_growth = 1 + input_values[2] / 100
def set_bess_param(self, cycles, efficiency, dod_up, dod_low, degr, scrap_percent):
self.total_cycles = cycles
self.rt_efficiency = efficiency / 100
self.dod = (dod_up - dod_low) / 100
self.degr = degr / 100
self.scrap_percent = scrap_percent / 100
def set_bess_cost(self, hour, cost):
if hour == 1:
self.cost_1hr = cost
if hour == 2:
self.cost_2hr = cost
def set_constant_throughput(self, throughput):
self.constant_throughput = throughput # 0 - degradation, 1 - no degradation
def set_bess_range(self, start, end, incr):
self.bess_size_start = start # Input
self.bess_size_end = end # Input
self.bess_size_incr = incr # Input
def set_discount_rate(self, discount):
self.discount_rate = discount / 100
def set_costs(self, costs):
# costs: costs[0] - dsm price, costs[1] - vc_peak, costs[2] - vc_min, costs[3] - vc_exc, (All for year 1)
self.costs = costs
def set_energy_cost_increase(self, eci):
self.eci = eci / 100
def set_financial_param(self, loan_percent, interest_rate, tax_rate, post_tax_roe, op_ex):
self.loan_percent = loan_percent / 100
self.interest_rate = interest_rate / 100
self.tax_rate = tax_rate / 100
self.roe = (post_tax_roe / 100) / (1 - tax_rate / 100)
self.op_ex = op_ex / 100
def set_roe(self, post_tax_roe):
self.roe = post_tax_roe / (1 - self.tax_rate)
def set_transmission_reduction(self, transmission_reduction):
hour = self.hours
if hour == 1:
self.transmission_reduction_1hr = transmission_reduction
if hour == 2:
self.transmission_reduction_2hr = transmission_reduction
def set_bess_cost_reduction(self, bess_cost_reduction_rate):
self.bess_cost_reduction_rate = bess_cost_reduction_rate / 100
def set_transformer_param(self, transformer_cost, transformer_interest, land_cost, trans_land_req):
self.transformer_cost = transformer_cost
self.transformer_interest = transformer_interest / 100
self.land_cost = land_cost
self.trans_land_req = trans_land_req
def set_outage_param(self, average_tariff, total_outage):
self.average_tariff = average_tariff
self.total_outage = total_outage
def get_years(self):
return self.years
def get_life(self):
return self.life
def get_hours(self):
return self.hours
def get_re_ic(self):
return self.solar_ic, self.wind_ic, self.solar_ic_cur, self.wind_ic_cur
def get_growth_rates(self):
return self.load_growth, self.solar_growth, self.wind_growth
def get_bess_param(self):
return self.life, self.total_cycles, self.rt_efficiency, self.degr, self.scrap_percent, self.dod
def get_bess_cost(self, hour):
if hour == 1:
return self.cost_1hr
if hour == 2:
return self.cost_2hr
def get_constant_throughput(self):
return self.constant_throughput
def get_bess_range(self):
return self.bess_size_start, self.bess_size_end, self.bess_size_incr
def get_discount_rate(self):
return self.discount_rate
def get_costs(self):
# costs: costs[0] - dsm price, costs[1] - vc_peak, costs[2] - vc_min, costs[3] - vc_exc, (All for year 1)
return self.costs
def get_energy_cost_increase(self):
return 1 + self.eci
def get_ramp_up_time_selection(self):
return self.ramp_up_start_time, self.ramp_up_end_time
def get_financial_param(self):
return self.loan_percent, self.interest_rate, self.roe, self.op_ex
def get_roe_tax(self):
post_tax_roe = self.roe * (1 - self.tax_rate)
return post_tax_roe, self.tax_rate
def get_transmission_reduction(self, hour):
if hour == 1:
return self.transmission_reduction_1hr
if hour == 2:
return self.transmission_reduction_2hr
def get_bess_cost_reduction(self):
return self.bess_cost_reduction_rate
def get_transformer_param(self):
return self.transformer_cost, self.transformer_interest, self.land_cost, self.trans_land_req
def get_outage_param(self):
return self.average_tariff, self.total_outage
|
989,840 | 9b7c5fe6a146f0b21642df5ce62d9fbf5d1dab39 | from django import forms
from studygroups.models import Application
from studygroups.models import Reminder
from studygroups.models import StudyGroup
from studygroups.models import StudyGroupMeeting
from studygroups.models import Feedback
from localflavor.us.forms import USPhoneNumberField
class ApplicationForm(forms.ModelForm):
mobile = USPhoneNumberField(required=False)
def clean(self):
cleaned_data = super(ApplicationForm, self).clean()
contact_method = cleaned_data.get("contact_method")
if contact_method == Application.EMAIL and not cleaned_data.get('email'):
self.add_error('email', "Please enter your email address or change your preferred contact method.")
elif contact_method == Application.TEXT and not cleaned_data.get('mobile'):
self.add_error('mobile', "Please enter your mobile number or change your preferred contact method.")
class Meta:
model = Application
labels = {
'mobile': 'What is your mobile number?',
'contact_method': 'Preferred Method of Contact.',
'computer_access': 'Can you bring your own laptop to the Learning Circle?',
'goals': 'In one sentence, please explain your goals for taking this course.',
'support': 'A successful study group requires the support of all of its members. How will you help your peers achieve their goals?',
}
exclude = ['accepted_at']
widgets = {'study_group': forms.HiddenInput}
class MessageForm(forms.ModelForm):
class Meta:
model = Reminder
exclude = ['study_group_meeting', 'created_at', 'sent_at']
widgets = {'study_group': forms.HiddenInput}
class StudyGroupForm(forms.ModelForm):
class Meta:
model = StudyGroup
fields = ['location_details', 'start_date', 'end_date', 'duration']
class StudyGroupMeetingForm(forms.ModelForm):
class Meta:
model = StudyGroupMeeting
fields = ['meeting_time', 'study_group']
widgets = {'study_group': forms.HiddenInput}
class FeedbackForm(forms.ModelForm):
class Meta:
model = Feedback
exclude = ['']
widgets = {'study_group_meeting': forms.HiddenInput}
|
989,841 | 5c4e35cc40030d343bc1cd5504da985827569aa7 | # use argv to get a filename
from sys import argv
# defines script and filename to argv
script, filename = argv
# get the contents of the txt file
txt = open(filename)
# print a string with format characters
print "Here's your file %r:" % filename
# print the contents of the txt file
print txt.read()
txt.close()
# print a sentence
print "Type the filename again:"
# assigns the variable file_again with user's input
file_again = raw_input("> ")
# assigns the variable txt_again with the contents of the txt file
txt_again = open(file_again)
# print the contents of txt_again
print txt_again.read()
txt_again.close()
# study drills 1:Above each line, write out in English what that line does.
# study drills 4:Get rid of the part from lines 10- 15 where you use raw_input and try the script then.
# just print the txt once
# study drills 5:Use only raw_input and try the script that way. Think of why one way of getting the filename would be better than another.
# print "Type the filename again:"
# file_again = raw_input("> ")
# txt_again = open(file_again)
# print txt_again.read()
# if don't use raw_input I needn't to print the filename in python just in command line.
# study drills 7:Start python again and use open from the prompt. Notice how you can open fi les and run read on them right there?
# read=open("C:/Users/18402/lpthw/ex15/ex15_sample.txt")
# read.read()
# read.close()
# study drills 8:Have your script also do a close() on the txt and txt_again variables. It’s important to close fi les when you are done with them.
# ok |
989,842 | c3397a77aa67ad487a21106b0b1f3deb62f0b8ca | # -*- coding: utf-8 -*-
# Copyright (c) 2014, Niklas Hauser
# All rights reserved.
#
# The file is part of my bachelor thesis and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import threading
import functools
class PeriodicTimer(object):
def __init__(self, interval, callback):
self.interval = interval
@functools.wraps(callback)
def wrapper(*args, **kwargs):
result = callback(*args, **kwargs)
if result:
self.thread = threading.Timer(self.interval,
self.callback)
self.thread.start()
self.callback = wrapper
def start(self):
self.thread = threading.Timer(self.interval, self.callback)
self.thread.start()
def cancel(self):
self.thread.cancel()
|
989,843 | a36e397cf4ca39a6f23b71f7f9edcda840903bc8 | import string, sys, random
string.punctuation += "”“’‘—"
f = open(str(sys.argv[1:2][0]), "r")
word_list = f.readlines()
f.close()
words_list = []
table = str.maketrans({key: None for key in string.punctuation})
for strings in word_list:
words_list += strings.split()
for word in range(len(words_list)):
words_list[word] = words_list[word].translate(table).lower()
histogram = []
for j in range(len(words_list)):
contains = False
if len(histogram) == 0:
histogram.append([words_list[j], 1])
else:
for i in range(len(histogram)):
if words_list[j] == histogram[i][0]:
histogram[i][1] += 1
contains = True
if not contains:
histogram.append([words_list[j], 1])
try:
times = int(sys.argv[2:3][0])
except IndexError:
times = 1000
total = 0
sampling = []
for sets in histogram:
total += sets[1]
for index, sets in enumerate(histogram):
if index - 1 >= 0:
sets[1] = (sets[1] / total) + histogram[index - 1][1]
else:
sets[1] = sets[1] / total
def sample():
chance = random.random()
choice = ""
for index, sets in enumerate(histogram):
if index - 1 >= 0:
if chance < sets[1] and chance > histogram[index - 1][1]:
choice = sets[0]
else:
if chance < sets[1]:
choice = sets[0]
return choice
for _ in range(times):
selected = sample()
contains = False
if len(sampling) == 0:
sampling.append([selected, 1])
else:
for i in range(len(sampling)):
if selected == sampling[i][0]:
sampling[i][1] += 1
contains = True
if not contains:
sampling.append([selected, 1])
print(histogram)
print(sampling)
|
989,844 | 6ba906f96f6b1b49dd704441165dd8ba988f8090 | import FWCore.ParameterSet.Config as cms
##
## Set standard binning for the residual histograms in both, standalone and DQM mode
##
from Alignment.OfflineValidation.TrackerOfflineValidation_cfi import *
# do the parameter setting before cloning, so the clone gets these values
TrackerOfflineValidation.TH1NormXprimeResStripModules.Nbinx = 120
TrackerOfflineValidation.TH1NormXprimeResStripModules.xmin = -3.0
TrackerOfflineValidation.TH1NormXprimeResStripModules.xmax = 3.0
#TrackerOfflineValidation.TH1NormXResStripModules.Nbinx = 120
#TrackerOfflineValidation.TH1NormXResStripModules.xmin = -3.0
#TrackerOfflineValidation.TH1NormXResStripModules.xmax = 3.0
TrackerOfflineValidation.TH1XprimeResStripModules.Nbinx = 5000
TrackerOfflineValidation.TH1XprimeResStripModules.xmin = -0.05 #-0.5
TrackerOfflineValidation.TH1XprimeResStripModules.xmax = 0.05 #0.5
#TrackerOfflineValidation.TH1XResStripModules.Nbinx = 5000
#TrackerOfflineValidation.TH1XResStripModules.xmin = -0.5
#TrackerOfflineValidation.TH1XResStripModules.xmax = 0.5
TrackerOfflineValidation.TH1NormYResStripModules.Nbinx = 120
TrackerOfflineValidation.TH1NormYResStripModules.xmin = -3.0
TrackerOfflineValidation.TH1NormYResStripModules.xmax = 3.0
TrackerOfflineValidation.TH1YResStripModules.Nbinx = 5000
TrackerOfflineValidation.TH1YResStripModules.xmin = -11.0
TrackerOfflineValidation.TH1YResStripModules.xmax = 11.0
TrackerOfflineValidation.TH1NormXprimeResPixelModules.Nbinx = 120
TrackerOfflineValidation.TH1NormXprimeResPixelModules.xmin = -3.0
TrackerOfflineValidation.TH1NormXprimeResPixelModules.xmax = 3.0
#TrackerOfflineValidation.TH1NormXResPixelModules.Nbinx = 120
#TrackerOfflineValidation.TH1NormXResPixelModules.xmin = -3.0
#TrackerOfflineValidation.TH1NormXResPixelModules.xmax = 3.0
TrackerOfflineValidation.TH1XprimeResPixelModules.Nbinx = 5000
TrackerOfflineValidation.TH1XprimeResPixelModules.xmin = -0.05 #-0.5
TrackerOfflineValidation.TH1XprimeResPixelModules.xmax = 0.05 #0.5
#TrackerOfflineValidation.TH1XResPixelModules.Nbinx = 5000
#TrackerOfflineValidation.TH1XResPixelModules.xmin = -0.5
#TrackerOfflineValidation.TH1XResPixelModules.xmax = 0.5
TrackerOfflineValidation.TH1NormYResPixelModules.Nbinx = 120
TrackerOfflineValidation.TH1NormYResPixelModules.xmin = -3.0
TrackerOfflineValidation.TH1NormYResPixelModules.xmax = 3.0
TrackerOfflineValidation.TH1YResPixelModules.Nbinx = 5000
TrackerOfflineValidation.TH1YResPixelModules.xmin = -0.05 #-0.5
TrackerOfflineValidation.TH1YResPixelModules.xmax = 0.05 #0.5
TrackerOfflineValidation.TProfileXResStripModules.Nbinx = 34
TrackerOfflineValidation.TProfileXResStripModules.xmin = -1.02
TrackerOfflineValidation.TProfileXResStripModules.xmax = 1.02
TrackerOfflineValidation.TProfileXResPixelModules.Nbinx = 17
TrackerOfflineValidation.TProfileXResPixelModules.xmin = -1.02
TrackerOfflineValidation.TProfileXResPixelModules.xmax = 1.02
TrackerOfflineValidation.TProfileYResStripModules.Nbinx = 34
TrackerOfflineValidation.TProfileYResStripModules.xmin = -1.02
TrackerOfflineValidation.TProfileYResStripModules.xmax = 1.02
TrackerOfflineValidation.TProfileYResPixelModules.Nbinx = 17
TrackerOfflineValidation.TProfileYResPixelModules.xmin = -1.02
TrackerOfflineValidation.TProfileYResPixelModules.xmax = 1.02
# First clone contains the standard histogram binning for both, Standalone and DQMmode
TrackerOfflineValidationBinned = TrackerOfflineValidation.clone()
##
## TrackerOfflineValidation (standalone mode)
##
# Second clone
TrackerOfflineValidationStandalone = TrackerOfflineValidationBinned.clone(
Tracks = 'TrackRefitterForOfflineValidation',
moduleLevelHistsTransient = cms.bool(True),
moduleLevelProfiles = cms.bool(False)
)
##
## Output File Configuration
##
# use TFileService
from PhysicsTools.UtilAlgos.TFileService_cfi import *
TFileService = cms.Service("TFileService",
fileName = cms.string('$TMPDIR/trackerOfflineValidation.root'),
closeFileFast = cms.untracked.bool(True)
)
##
## Sequence
##
seqTrackerOfflineValidationStandalone = cms.Sequence(TrackerOfflineValidationStandalone)
|
989,845 | fcf098d4198fc19b37fecf5ff9656a6095e0dd21 | import turtle as t
r = 10
dr = 40
head = 90
for i in range(4):
t.pendown()
t.circle(r)
r += dr
t.pu()
t.seth(-head)
t.fd(dr)
t.seth(0)
t.done()
|
989,846 | 772d99dd8597b3b60b42f915eb26deab9bea5386 | #Set up environment Import neccessary packages.
import matplotlib.pyplot as plt
import pandas as pd
import datetime as dt
from scipy.stats import gaussian_kde
import os
import numpy as np
import tensorflow as tf # This code has been tested with TensorFlow 1.6
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
%matplotlib inline
|
989,847 | cd4f15104aad19f28a766be9feb8a2f1c9913592 | from django.shortcuts import render
from django.views.generic import FormView, RedirectView
from django.contrib.auth import authenticate, login, logout
from .forms import LoginForm
from .models import User
class LoginView(FormView):
form_class = LoginForm
template_name = 'login.html'
def form_valid(self, form, *args, **kwargs):
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is None:
context = self.get_context_data(**kwargs)
context['errors'] = ['Usuario y/o contraseña incorrectos.']
else:
if user.is_active:
login(self.request, user)
return super(LoginView, self).form_valid(form)
else:
context = self.get_context_data(**kwargs)
context['errors'] = ['Usuario inactivo.']
return render(self.request, self.template_name, context)
def get_success_url(self):
print('success')
next_url = self.request.GET.get('next', '/')
if next_url is not None and next_url != '':
return next_url
return '/'
class LogoutView(RedirectView):
url = '/'
def get_redirect_url(self, *args, **kwargs):
logout(self.request)
return super(LogoutView, self).get_redirect_url(*args, **kwargs)
|
989,848 | 36612802b2fb13dd835df019e76eb7a92cd5f92a | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import time
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import math
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.metrics import f1_score, roc_auc_score
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Dense, Input, CuDNNLSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D
from tensorflow.keras.layers import Bidirectional, GlobalMaxPool1D, GlobalMaxPooling1D, GlobalAveragePooling1D
from tensorflow.keras.layers import Input, Embedding, Dense, Conv2D, MaxPool2D, concatenate
from tensorflow.keras.layers import Reshape, Flatten, Concatenate, Dropout, SpatialDropout1D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from tensorflow.keras import initializers, regularizers, constraints, optimizers, layers
from tensorflow.keras.layers import concatenate
from tensorflow.keras.callbacks import *
def load_and_prec(max_features, maxlen):
train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
print("Train shape : ",train_df.shape)
print("Test shape : ",test_df.shape)
## fill up the missing values
train_X = train_df["question_text"].fillna("_##_").values
test_X = test_df["question_text"].fillna("_##_").values
## Tokenize the sentences
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(train_X))
train_X = tokenizer.texts_to_sequences(train_X)
test_X = tokenizer.texts_to_sequences(test_X)
## Pad the sentences
train_X = pad_sequences(train_X, maxlen=maxlen)
test_X = pad_sequences(test_X, maxlen=maxlen)
## Get the target values
train_y = train_df['target'].values
#shuffling the data
np.random.seed(2018)
trn_idx = np.random.permutation(len(train_X))
train_X = train_X[trn_idx]
train_y = train_y[trn_idx]
return train_X, test_X, train_y, tokenizer.word_index |
989,849 | b822f0ad3b66039117b2417e01ddb3f7dd7102b8 | # Average Percentage Calculator
print("Enter 'x' to go back. ")
print("Enter marks obtained in 5 subjects: ")
m1 = input()
if m1 == 'x':
import MHOME
else:
m2 = input()
m3 = input()
m4 = input()
m5 = input()
mark1 = int(m1)
mark2 = int(m2)
mark3 = int(m3)
mark4 = int(m4)
mark5 = int(m5)
sum = mark1 + mark2 + mark3 + mark4 + mark5
average = sum/5
percentage = (sum/500)*100
print("Average Marks = ", average)
print("Percentage Marks = ", percentage,"%")
|
989,850 | 50d72d935460a3883806345ca4e1c3baa8225217 | from cocos.director import director
from cocos.sprite import Sprite
from cocos.euclid import Vector2
from cocos.collision_model import CircleShape, AARectShape
from cocos.actions import IntervalAction, Delay, CallFunc, MoveBy
from cocos.text import Label
from pyglet.image import ImageGrid, Animation, load
import math
# load the sprite sheet image
raw = load("assets/explosion.png")
# it has 1 row and 8 columns
seq = ImageGrid(raw, 1, 8) # one row eight columns
# create an animation that cycles through the frames, playing
# each for 0.07 seconds and do NOT loop it
explosion_img = Animation.from_image_sequence(seq, 0.07, False) # no loop
class Actor(Sprite):
def __init__(self, image, x, y):
# like the Actor class in our other game, initialize
# with image and starting coordinates
super().__init__(image)
pos = Vector2(x, y)
self.position = pos
# underscore to make this a private property
self._cshape = CircleShape(pos, self.width * 0.5)
@property
def cshape(self):
# now, every time the collider shape is accessed,
# its position will be updated to match the Actor's
self._cshape.center = Vector2(self.x, self.y)
return self._cshape
# an action that tanks can perform to turn red when hit
class Hit(IntervalAction):
# if no duration specified, will be half a second
def init(self, duration=0.5): # FIXME: this was just init before
self.duration = duration
# receives the percent of the action's duration that
# has elapsed, from 0.0 - 1.0
def update(self, pct_elapsed):
self.target.color = (255, 255 * pct_elapsed, 255 * pct_elapsed)
class Explosion(Sprite):
def __init__(self, pos):
super().__init__(explosion_img, pos)
# the do() method is how sprites perform actions
# wait one second, then destroy yourself
self.do(Delay(0.7) + CallFunc(self.kill)) # FIXME: used * instead of +
class Enemy(Actor):
def __init__(self, x, y, actions, game):
super().__init__("tank.png", x, y)
self.game = game
self.max_health = 100
# starts with 100 health
self.health = 100
# worth 20 points when destroyed
self.points = 20
# points aren't awarded if the tank crashes into the bunker
self.destroyed_by_player = False # points if True
self.health_bar = TankHealthLabel(self.max_health / 20, 1)
self.health_bar.position = (self.x, self.y - 20)
self.game.add(self.health_bar)
self.schedule(self.manage_bar)
# do the action chain that came from the scenario
self.do(actions) # move, turn, move, whatever
# called when a tank is destroyed
def explode(self):
# add an Explosion sprite to the game at tank's current position
self.parent.add(Explosion(self.position))
# remove itself from game
self.health_bar.kill()
self.kill()
def manage_bar(self, _):
# rotation = self.rotation
self.health_bar.position = (self.x, self.y - 20)
# called when the tank is hit by a turret
def hit(self):
# lose 25 health points
self.health -= 25
# perform the action to turn red
self.do(Hit())
# self.health_bar.element.text = ''.join(['▋' for _ in range(round(self.health / 20))])
self.health_bar.set_percent(self.health / self.max_health)
# check if out of health and still in the game
if self.health <= 0 and self.is_running:
# health was reduced by a turret hit
self.destroyed_by_player = True
# destroy itself
self.explode()
class TankHealthLabel(Label):
def __init__(self, bars, prc, font_size=5):
super().__init__('', font_size=font_size, anchor_x='center',
anchor_y='top', color=(255, 80, 0, 255))
self.percent = prc
self.bars = bars
self.set_percent(self.percent)
def set_percent(self, prc: float):
bars_display = ''.join(['▋' for _ in range(round(self.bars * prc))])
self.element.text = bars_display
# class HealthBar(Sprite):
# def __init__(self, pos):
# super().__init__('assets/health_bar.png', pos)
#
# self.percent = 1.0
#
# def set_percent(self, prc: float):
# self.percent = 1.0
class Bunker(Actor):
def __init__(self, x, y, game=None):
super().__init__("bunker.png", x, y)
# the bunker has 100 health to start
self.max_health = 100
self.health = self.max_health
w, h = director.get_window_size()
self.health_bar = TankHealthLabel(self.max_health / 5, 1, 18)
# health_pos = Vector2(300, 470) # the desired position
health_pos = Vector2(w / 2, h - 10) # the desired position
self.health_bar.position = health_pos - (self.x, self.y) # adjusting desired position to be relative the bunker
# game.add(self.health_bar)
self.add(self.health_bar)
def collide(self, other):
# did bunker collide with an Enemy object?
if isinstance(other, Enemy):
# reduce health by 10
self.health -= 10
self.health_bar.set_percent(self.health / self.max_health)
# explode the Enemy object
other.explode()
# check for bunker death
if self.health <= 0 and self.is_running:
# self.health_bar.kill()
self.kill()
# turret missiles aren't Actors because they don't collide
class Shoot(Sprite):
def __init__(self, pos, travel_path, enemy):
super().__init__("shoot.png", position=pos)
# perform a chain of actions:
# move toward enemy very quickly,
# remove itself from game,
# call the Enemy's hit() function
self.do(MoveBy(travel_path, 0.1) +
CallFunc(self.kill) +
CallFunc(enemy.hit))
# turret slot images are part of the background image, so they
# are not sprites
class TurretSlot:
def __init__(self, pos, side):
self.available = True
# use the "splat" operator to unpack position vector into x and y
self.cshape = AARectShape(
Vector2(*pos),
side * 0.5,
side * 0.5
)
class Turret(Actor):
def __init__(self, x, y):
super().__init__("turret.png", x, y)
# contains a second sprite - the white range indicator circle
self.add(Sprite("range.png", opacity=50, scale=5))
# the collider is the same size as the range circle, which has
# been scaled to 5 times its normal size
self.cshape.r = self.width * 5 / 2
# no tank targeted... yet
self.target = None
# turrets reload every 2 seconds
self.period = 2.0
# track time elapsed since last shot fired
self.elapsed = 0.0
# call the _shoot function every frame to see if eligible to fire
self.schedule(self._shoot)
def _shoot(self, dt):
# not enough time elapsed since last shot fired
if self.elapsed < self.period:
# keep accumulating time
self.elapsed += dt
elif self.target is not None:
# otherwise, if it has a target, fire!
# reset the reload timer
self.elapsed = 0.0
# calculate difference between turret and tank positions
target_path = Vector2(
self.target.x - self.x,
self.target.y - self.y
)
# normalize the vector so we can adjust it by the length of
# the turret barrels
pos = self.cshape.center + target_path.normalized() * 20
# create a missile at the tip of the barrels
self.parent.add(Shoot(pos, target_path, self.target)) # slide 106
# called if a tank intersects the turret's firing range circle
def collide(self, other):
# uh oh, pal... you're a target now
self.target = other
# if it's a real target
if self.target is not None:
# find the angle of rotation for the turret to point at tank
x, y = other.x - self.x, other.y - self.y
# use arc tangent to find the angle
angle = -math.atan2(y, x)
# convert radians to degrees
self.rotation = math.degrees(angle)
|
989,851 | 0e0c675fa1b9528a5aaf9524936a3db200dec0d2 | import datetime
import re
from src.utils import valid_date_string
def extract_cnh_number(data: dict, text: str, ratio: float) -> None:
if (5.1 <= ratio <= 7.1 and data['numero'] is None):
numbers = re.findall(r"(\d{11})", text)
for number in numbers:
if (len(number) == 11):
data['numero'] = number
def extract_cpf_cnh(data: dict, text: str, text_size: str, ratio: float) -> None:
cpf_pattern = '[0-9]{3}\.[0-9]{3}\.[0-9]{3}\-[0-9]{2}'
if (3.3 <= ratio <= 5.3) and (data["cpf"] is None) and (text_size > 12):
text = text.replace(',', '.').replace(' ', '')
cpf_search = re.findall(cpf_pattern, text)
if cpf_search:
data["cpf"] = cpf_search[0]
def extract_rg_cnh(data: dict, text: str, words: int, ratio: float) -> None:
if (7.2 <= ratio <= 10):
if (data["rg"] is None):
if (words >= 1):
rg_data = re.split(r"([\w\/]+)", text)
for d in rg_data:
size = len(d)
if (size == 2):
data["rg_uf"] = d
elif (2 <= size <= 6):
data["rg_emissor"] = d
elif (size > 6):
data["rg"] = "".join(d.split())
def extract_dates_cnh(data: dict, text: str, text_size: int, ratio: float) -> None:
if (3.3 <= ratio <= 5) and (text_size == 10 and text.find("/")):
date = valid_date_string(text.strip())
if not date:
return
if (isinstance(date, datetime.datetime)):
now = datetime.datetime.now()
legalAge = datetime.datetime.now() - datetime.timedelta(days=365*18)
if (data["dt_nasc"] is None and date < legalAge):
data["dt_nasc"] = "".join(text.split())
if (data["validade"] is None and date > now):
data["validade"] = "".join(text.split())
def extract_name_cnh(data: dict, text: str, words: int, ratio: float) -> None:
if (12.5 <= ratio <= 17) and (data["nome"] is None) and (words < 7 and words > 1):
data["nome"] = text
|
989,852 | 1adf3953075bdf7a0ea2aa86035a2b3a354953ec | from django.db import models
from modelcluster.fields import ParentalKey
from wagtail.core.models import Page, Orderable
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel, InlinePanel
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.search import index
class HomePage(Page):
body = RichTextField(blank=True)
content_panels = Page.content_panels + [
InlinePanel('hero_images', label='Hero Images'),
FieldPanel('body', classname="full"),
]
class HeroGalleryImage(Orderable):
page = ParentalKey(HomePage, on_delete=models.CASCADE, related_name='hero_images')
hero_image = models.ForeignKey(
'wagtailimages.Image', on_delete=models.CASCADE, related_name='+'
)
hero_slogan = models.CharField(blank=True, max_length=250)
panels = [
ImageChooserPanel('hero_image'),
FieldPanel('hero_slogan'),
]
# Keep the definition of BlogIndexPage, and add:
class WebPage(Page):
body = RichTextField(blank=True)
search_fields = Page.search_fields + [
index.SearchField('body'),
]
content_panels = Page.content_panels + [
InlinePanel('slider_images', label="Slider images"),
FieldPanel('body', classname="full"),
]
class WebPageGalleryImage(Orderable):
page = ParentalKey(WebPage, on_delete=models.CASCADE, related_name='slider_images')
image = models.ForeignKey(
'wagtailimages.Image', on_delete=models.CASCADE, related_name='+'
)
caption = models.CharField(blank=True, max_length=250)
panels = [
ImageChooserPanel('image'),
FieldPanel('caption'),
]
|
989,853 | 42fc8b92a73e47ce1698d2f4061ccb6f8805b0a2 | # 26. Write a Python program to create a histogram from a given list of integers.
def histogram (a):
for i in a:
print (i * "*")
a = (1, 2, 10, 15, 3, 4, 5, 18)
histogram (a)
|
989,854 | b75da5b753a29256d23c1f4f109fc12c457bd640 | from django.conf.urls import url
from . import views
app_name = 'chat'
urlpatterns = [
url(r'chat/$', views.create_message, name='chat'),
url(r'messages/(?P<pk>\d+)$', views.message_list, name='message_list')
]
|
989,855 | 05682fd58e32f6589415407a01bb75175e58df7e | import os
import logging
import face_recognition
class FaceRecognizer():
"""Face recognition module for package theft detection system"""
def __init__(self):
self._load_known_face()
def _load_known_face(self):
"""Loads known faces from the filesystem"""
faces_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'faces')
faces = [os.path.join(faces_dir, f) for f in os.listdir(faces_dir) if f.endswith('.jpeg')]
known_images = [face_recognition.load_image_file(i) for i in faces]
self.known_faces = []
for image in known_images:
encoding = face_recognition.face_encodings(image)
if len(encoding) > 0:
logging.debug('Adding known face')
self.known_faces.append(encoding[0])
def known_face_detected(self, frame):
"""Retuns bool if a known face is detected"""
faces_detected = face_recognition.face_encodings(frame)
if len(faces_detected) > 0:
unknown = face_recognition.face_encodings(frame)[0]
results = face_recognition.compare_faces(self.known_faces, unknown)
if True in results:
logging.info('Known face detected')
return True
logging.info('Unknown face detected')
return False
|
989,856 | b4c989ed44ba28f5d197c58faf64742185a44af3 | # 4.14 Flattening a Nested Sequence
from collections import Iterable
def flatten(items, ignore_types=(str, bytes)):
for x in items:
if isinstance(x, Iterable) and not isinstance(x, ignore_types):
yield from flatten(x)
else:
yield x
items = [1, 2, 3, [4, 5, [7, 8, [9, 0] ] ] ]
print(list(x for x in flatten(items)))
# [1, 2, 3, 4, 5, 7, 8, 9, 0]
|
989,857 | 73943c5ea49b330f57ef9dd37d34f9fa3de1619e | from collections import deque
def solution(n, edge):
graph = [[] for _ in range(n + 1)]
for a, b in edge:
graph[a].append(b)
graph[b].append(a)
visit = [False] * (n + 1)
dist = [0] * (n + 1)
visit[1] = True
q = deque()
q.append(1)
# BFS 활용
while q:
now = q.popleft()
for x in graph[now]:
if not visit[x]:
visit[x] = True
dist[x] = dist[now] + 1
q.append(x)
return dist.count(max(dist))
print(solution(6, [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]])) |
989,858 | 842f3c7af5be09aca334fc399460098b77505331 | command = input("> ")
command = str(command)
command = command.split()
if command[0] in ["GET", "SET", "UNSET", "NUMEQUALTO", "END"]:
print ("Seems to work!")
|
989,859 | 48dbd793bf38fe3c4bcf78d7fd3171aba91a8e05 | import functools
import json
from rest_framework import status
from django.http import JsonResponse
from core import models, serializers
MODEL_SERIALIZERS = {
models.Degree.__name__: serializers.degree_to_dict
}
def serialize_django_model(model_instance):
class_key = model_instance.__class__.__name__
serialize = MODEL_SERIALIZERS.get(class_key)
if not serialize:
raise NotImplementedError('No serializer registered for {}'.format(class_key))
return serialize(model_instance)
def api_success(data, status_code=status.HTTP_200_OK):
dict_data = serialize_django_model(data) if models.is_django_model(data) else data
return JsonResponse({'data': dict_data}, status=status_code)
def api_error(message, status_code=status.HTTP_200_OK):
return JsonResponse({'error': message}, status=status_code)
def parse_api_json_body(func):
@functools.wraps(func)
def wrap(request, *args, **kwargs):
parsed_request_body = None
decoded = request.body.decode('utf-8')
if decoded is not None and not (decoded == ""):
parsed_request_body = json.loads(decoded)
return func(request, *args, **kwargs, parsed_body=parsed_request_body)
return wrap |
989,860 | 61ad2b77765761df06d16e51c1b8ac792975a894 | def print_odds():
i=1
while True:
s = i+2
i=s
print(s)
|
989,861 | d085d6078090ca3568cbb9440825442cd6f110ea | # -*- coding: utf-8 -*-
'''
Truncatable primes
Problem 37
The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3.
Find the sum of the only eleven primes that are both truncatable from left to right and right to left.
NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.
Answer: 748317 Completed on Mon, 20 Oct 2014, 16:33
https://projecteuler.net/problem=37
@author Botu Sun
'''
# Cannot have even digits.
# First digit can be 2, 3, 5, 7
# Middle digits can be 3, 5, 7, 9
# Last digit can be 3, 5, 7
import math
map = {2: True, 1: False}
def IsPrime(n):
global map
if map.has_key(n):
return map.get(n)
for i in xrange(2, int(math.sqrt(n)) + 1):
if n % i == 0:
map[n] = False
return False
map[n] = True
return True
def ValidNumberForRecursion(n):
digits = len(str(n))
b = n
while b != 0:
if not IsPrime(b):
return False
b = b % int(math.pow(10, digits - 1))
digits -= 1
return True
def ValidNumberFromRight(n):
while n != 0:
if not IsPrime(n):
return False
n /= 10
return True
def Recursion(n, digits):
global sum
if ValidNumberForRecursion(n):
if ValidNumberFromRight(n):
print n
sum += n
Recursion(n + 1 * int(math.pow(10, digits)), digits + 1)
Recursion(n + 2 * int(math.pow(10, digits)), digits + 1)
Recursion(n + 3 * int(math.pow(10, digits)), digits + 1)
Recursion(n + 5 * int(math.pow(10, digits)), digits + 1)
Recursion(n + 7 * int(math.pow(10, digits)), digits + 1)
Recursion(n + 9 * int(math.pow(10, digits)), digits + 1)
sum = 0 - 2 - 3 - 5 - 7
Recursion(2, 1)
Recursion(3, 1)
Recursion(5, 1)
Recursion(7, 1)
print sum
|
989,862 | b570a16ddc3a97a5cae42b9bd20a90fd9c3d712d | import unittest
import os
import os.path
from programy.storage.stores.file.store.licensekeys import FileLicenseStore
from programy.storage.stores.file.engine import FileStorageEngine
from programy.storage.stores.file.config import FileStorageConfiguration
from programy.utils.license.keys import LicenseKeys
from programy.storage.stores.file.config import FileStoreConfiguration
class FileLicenseKeysStoreTests(unittest.TestCase):
def test_initialise(self):
config = FileStorageConfiguration()
engine = FileStorageEngine(config)
engine.initialise()
store = FileLicenseStore(engine)
self.assertEqual(store.storage_engine, engine)
def test_load_license_key(self):
config = FileStorageConfiguration()
config._license_storage = FileStoreConfiguration(file=os.path.dirname(__file__) + os.sep + "data" + os.sep + "licenses" + os.sep + "test_license.keys", format="text", encoding="utf-8", delete_on_start=False)
engine = FileStorageEngine(config)
engine.initialise()
store = FileLicenseStore(engine)
store.empty()
license_keys = LicenseKeys()
store.load(license_keys)
self.assertTrue(license_keys.has_key("TESTKEY1"))
self.assertEqual("VALUE1", license_keys.get_key("TESTKEY1"))
self.assertTrue(license_keys.has_key("TESTKEY2"))
self.assertEqual("VERY LONG VALUE 2", license_keys.get_key("TESTKEY2"))
|
989,863 | 34a5e7d5dd43324c4f45a1205259e0e41c2ad566 | # There is a brick wall in front of you. The wall is rectangular and has several rows of bricks. The bricks have the same height but different width. You want to draw a vertical line from the top to the bottom and cross the least bricks.
# The brick wall is represented by a list of rows. Each row is a list of integers representing the width of each brick in this row from left to right.
# If your line go through the edge of a brick, then the brick is not considered as crossed. You need to find out how to draw the line to cross the least bricks and return the number of crossed bricks.
# You cannot draw a line just along one of the two vertical edges of the wall, in which case the line will obviously cross no bricks.
# Input: [[1,2,2,1], - [1, 1, 1, 1, 1, 1]
# [3,1,2],
# [1,3,2],
# [2,4],
# [3,1,2],
# [1,3,1,1]]
class Solution:
def leastBricks(self, wall) -> int:
row_len = sum(wall[0])
matches = [0] * row_len
max_gap = 0
for row in wall:
row_sum = 0
for brick in row:
row_sum += brick
if (row_sum < row_len):
matches[row_sum] += 1
max_gap = max(matches[row_sum], max_gap)
return len(wall) - max_gap
|
989,864 | 54467dbb7c704e6a1548bb3a32690f5ae8599903 | import locale
locale.setlocale(locale.LC_ALL, '')
import matplotlib as mpl
mpl.rcParams.update({'mathtext.fontset':'dejavusans'})
mpl.rcParams['axes.formatter.use_locale'] = True
mpl.rcParams.update({'font.size': 26, 'text.usetex':False})
import os
import matplotlib.pyplot as plt
import numpy as np
def gamma(gamma0, omega, t):
gamma = gamma0 * np.cos(omega * t)
return gamma
def tau(tau0, omega, theta, t):
tau = tau0 * np.cos(omega * t - theta)
return tau
omega = 5
gamma0 = 5
tau0 = 1
t = np.linspace(3 * np.pi/(2 * omega), 7 * np.pi/(2 * omega), 1000)
theta = np.pi/6
y_gamma = gamma(gamma0, omega, t)
fig, ax4 = plt.subplots(nrows=1, ncols=1, figsize=(10.8, 7.2))
ax1 = ax4.twinx()
valid_is = [0, 25, 50, 75, 100]
for i in range(0, 201, 1):
if i < 100:
perc = i/100
tau_elast = tau0 - perc * tau0
tau_visc = tau0 - tau_elast
title = '{0}% elástico, {1}% viscoso'.format(100-i, i)
if i >= 100:
perc = (i-100)/100
tau_visc = tau0 - perc * tau0
tau_elast = tau0 - tau_visc
title = '{0}% elástico, {1}% viscoso'.format(i-100, 100 - (i - 100))
# continua ... |
989,865 | 952926cc9c641abc2141214dc5b555c99a873340 | from enum import Enum
from typing import Dict, Optional
import turing.generated.models
from turing.generated.model_utils import OpenApiModel
from .source import EnsemblingJobSource, EnsemblingJobPredictionSource
from .sink import EnsemblingJobSink
ResourceRequest = turing.generated.models.EnsemblingResources
class ResultType(Enum):
DOUBLE = 0
FLOAT = 1
INTEGER = 2
LONG = 3
STRING = 4
ARRAY = 10
def to_open_api(self) -> OpenApiModel:
return turing.generated.models.EnsemblingJobResultType(self.name)
class ResultConfig:
def __init__(
self,
type: ResultType,
column_name: str,
item_type: Optional[ResultType] = None):
self._type, self._column_name, self._item_type = type, column_name, item_type
def to_open_api(self) -> OpenApiModel:
kwargs = {
'type': self._type.to_open_api(),
'column_name': self._column_name
}
if self._item_type:
kwargs['item_type'] = self._item_type.to_open_api()
return turing.generated.models.EnsemblingJobEnsemblerSpecResult(**kwargs)
class EnsemblingJobConfig:
"""
Configuration of the batch ensembling job
"""
def __init__(self,
source: EnsemblingJobSource,
predictions: Dict[str, EnsemblingJobPredictionSource],
result_config: ResultConfig,
sink: EnsemblingJobSink,
service_account: str,
resource_request: ResourceRequest = None,
env_vars: Dict[str, str] = None):
"""
Create new instance of batch ensembling job configuration
:param source: source configuration
:param predictions: dictionary with configuration of model predictions
:param result_config: configuration of ensembling results
:param sink: sink configuration
:param service_account: secret name containing the service account for executing the ensembling job
:param resource_request: optional resource request for starting the ensembling job.
If not given the system default will be used.
:param env_vars: optional environment variables in the form of a key value pair in a list.
"""
self._source = source
self._predictions = predictions
self._result_config = result_config
self._sink = sink
self._service_account = service_account
self._resource_request = resource_request
self._env_vars = env_vars
@property
def source(self) -> 'EnsemblingJobSource':
return self._source
@property
def predictions(self) -> Dict[str, EnsemblingJobPredictionSource]:
return self._predictions
@property
def sink(self) -> 'EnsemblingJobSink':
return self._sink
@property
def result_config(self) -> 'ResultConfig':
return self._result_config
@property
def service_account(self) -> str:
return self._service_account
@property
def resource_request(self) -> Optional['ResourceRequest']:
return self._resource_request
def job_spec(self) -> turing.generated.models.EnsemblingJobSpec:
return turing.generated.models.EnsemblingJobSpec(
source=self.source.to_open_api(),
predictions={name: source.to_open_api() for name, source in self.predictions.items()},
ensembler=turing.generated.models.EnsemblingJobEnsemblerSpec(
result=self.result_config.to_open_api()
),
sink=self.sink.to_open_api()
)
def infra_spec(self) -> turing.generated.models.EnsemblerInfraConfig:
return turing.generated.models.EnsemblerInfraConfig(
service_account_name=self.service_account,
resources=self.resource_request
)
|
989,866 | cab2a4dd3998fc70691d109fa6b2b4287318849f | # #breadth_first_search #easy
from math import sqrt
from queue import Queue
primes = [2]
for i in range(3, 10000, 2):
f, r = None, int(sqrt(i))
for p in primes:
if i % p == 0:
f = p
break
elif p > r:
break
if f == None:
primes.append(i)
primes = list(filter(lambda n: n >= 1000, primes))
outs = [ [] for _ in range(len(primes))]
for i in range(len(primes)):
for j in range(i + 1, len(primes)):
p, q = primes[i], primes[j]
while p % 10 == q % 10:
p, q = p//10, q//10
if p // 10 == q // 10:
outs[i].append(j)
outs[j].append(i)
for _ in range(int(input())):
m, n = map(int, input().split())
m = primes.index(m)
n = primes.index(n)
change_counts = [0 if i == m else None for i in range(len(primes))]
q = Queue()
q.put(m)
while (not q.empty()) and change_counts[n] == None:
i = q.get()
for j in filter(lambda j: change_counts[j] == None, outs[i]):
change_counts[j] = change_counts[i] + 1
q.put(j)
print('Impossible' if change_counts[n] == None else change_counts[n])
|
989,867 | 58ae9d644ad00373672d0f2ee33da73efe7a4699 | """Cascade delete added
Revision ID: cdd41796aa24
Revises: ff65b93addb9
Create Date: 2021-04-22 16:24:22.519825
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cdd41796aa24'
down_revision = 'ff65b93addb9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('city_alias_city_id_version_fkey', 'city_alias', type_='foreignkey')
op.create_foreign_key(None, 'city_alias', 'city', ['city_id', 'version'], ['self_id', 'version'], ondelete='CASCADE')
op.drop_constraint('city_xref_service_city_id_version_fkey', 'city_xref_service', type_='foreignkey')
op.create_foreign_key(None, 'city_xref_service', 'city', ['city_id', 'version'], ['self_id', 'version'], ondelete='CASCADE')
op.drop_constraint('operation_type_alias_operation_type_id_version_fkey', 'operation_type_alias', type_='foreignkey')
op.create_foreign_key(None, 'operation_type_alias', 'operation_type', ['operation_type_id', 'version'], ['self_id', 'version'], ondelete='CASCADE')
op.drop_constraint('operation_type_xref_service_operation_type_id_version_fkey', 'operation_type_xref_service', type_='foreignkey')
op.create_foreign_key(None, 'operation_type_xref_service', 'operation_type', ['operation_type_id', 'version'], ['self_id', 'version'], ondelete='CASCADE')
op.drop_constraint('realty_type_alias_realty_type_id_version_fkey', 'realty_type_alias', type_='foreignkey')
op.create_foreign_key(None, 'realty_type_alias', 'realty_type', ['realty_type_id', 'version'], ['self_id', 'version'], ondelete='CASCADE')
op.drop_constraint('realty_type_xref_service_realty_type_id_version_fkey', 'realty_type_xref_service', type_='foreignkey')
op.create_foreign_key(None, 'realty_type_xref_service', 'realty_type', ['realty_type_id', 'version'], ['self_id', 'version'], ondelete='CASCADE')
op.drop_constraint('state_alias_state_id_version_fkey', 'state_alias', type_='foreignkey')
op.create_foreign_key(None, 'state_alias', 'state', ['state_id', 'version'], ['self_id', 'version'], ondelete='CASCADE')
op.drop_constraint('state_xref_service_state_id_version_fkey', 'state_xref_service', type_='foreignkey')
op.create_foreign_key(None, 'state_xref_service', 'state', ['state_id', 'version'], ['self_id', 'version'], ondelete='CASCADE')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'state_xref_service', type_='foreignkey')
op.create_foreign_key('state_xref_service_state_id_version_fkey', 'state_xref_service', 'state', ['state_id', 'version'], ['self_id', 'version'], ondelete='RESTRICT')
op.drop_constraint(None, 'state_alias', type_='foreignkey')
op.create_foreign_key('state_alias_state_id_version_fkey', 'state_alias', 'state', ['state_id', 'version'], ['self_id', 'version'], ondelete='RESTRICT')
op.drop_constraint(None, 'realty_type_xref_service', type_='foreignkey')
op.create_foreign_key('realty_type_xref_service_realty_type_id_version_fkey', 'realty_type_xref_service', 'realty_type', ['realty_type_id', 'version'], ['self_id', 'version'], ondelete='RESTRICT')
op.drop_constraint(None, 'realty_type_alias', type_='foreignkey')
op.create_foreign_key('realty_type_alias_realty_type_id_version_fkey', 'realty_type_alias', 'realty_type', ['realty_type_id', 'version'], ['self_id', 'version'], ondelete='RESTRICT')
op.drop_constraint(None, 'operation_type_xref_service', type_='foreignkey')
op.create_foreign_key('operation_type_xref_service_operation_type_id_version_fkey', 'operation_type_xref_service', 'operation_type', ['operation_type_id', 'version'], ['self_id', 'version'], ondelete='RESTRICT')
op.drop_constraint(None, 'operation_type_alias', type_='foreignkey')
op.create_foreign_key('operation_type_alias_operation_type_id_version_fkey', 'operation_type_alias', 'operation_type', ['operation_type_id', 'version'], ['self_id', 'version'], ondelete='RESTRICT')
op.drop_constraint(None, 'city_xref_service', type_='foreignkey')
op.create_foreign_key('city_xref_service_city_id_version_fkey', 'city_xref_service', 'city', ['city_id', 'version'], ['self_id', 'version'], ondelete='RESTRICT')
op.drop_constraint(None, 'city_alias', type_='foreignkey')
op.create_foreign_key('city_alias_city_id_version_fkey', 'city_alias', 'city', ['city_id', 'version'], ['self_id', 'version'], ondelete='RESTRICT')
# ### end Alembic commands ###
|
989,868 | 1e8a266ab9f23cc52822d28ca3115941291b8a44 | '''Если мы возьмем 47, перевернем его и сложим,
получится 47 + 74 = 121 — число-палиндром.
Если взять 349 и проделать над ним эту операцию три раза,
то тоже получится палиндром:
349 + 943 = 1292
1292 + 2921 = 4213
4213 + 3124 = 7337
Найдите количество положительных натуральных чисел меньших 12296 таких,
что из них нельзя получить палиндром за 50 или менее применений описанной
операции (операция должна быть применена хотя бы один раз).'''
def a1(n):
'''ф-ция возвращает 1 если из числа n нельзя получить
палиндром за 50 или менее циклов.'''
if n == 0:
return 0
for i in range(1, 50 + 1):
n = n + int(str(n)[::-1])
sn = str(n)
if int(len(sn) / 2) == 0:
continue
for j in range(int(len(sn) / 2)):
if sn[j] != sn[-1 - j]:
break
else:
return 0
return 1
s = 0
for i in range(12296):
s += a1(i)
if i % 100 == 0:
print(i, ' ', s)
print('answer = {}'.format(s)) # ответ 319 |
989,869 | 19362a2d0a3c13a27e936a97ad38920a21986672 | import os
import sys
import numpy as np
def shuffle_data(data, labels):
""" Shuffle data and labels.
Input:
data: B,N,... numpy array
label: B,... numpy array
Return:
shuffled data, label and shuffle indices
"""
idx = np.arange(len(labels))
np.random.shuffle(idx)
return data[idx, ...], labels[idx], idx
def rotate_point_cloud_z(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, sinval, 0],
[-sinval, cosval, 0],
[0, 0, 1]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
|
989,870 | 0fe0d274dae075788c6812034263a7e6c39dc87b | #!/usr/bin/env python3
import astropy.units as u
import datetime
from matplotlib import pyplot as plt
import numpy as np
import os
from scipy import ndimage
import shutil
import sunpy.map
from sunpy.net import attrs
from sunpy.net import Fido
import sys
################################################################################
# the directory in which downloaded images will be saved
dldir = 'downloaded/'
################################################################################
def sun_data_request(start_time, end_time):
'''
Download all SOHO/EIT data recorded in the given time interval.
Save the downloaded images in the working directory.
Data is stored at intervals of 12 minutes. Hence, straddle a 12-minute mark for each file you need.
For instance, use the following
start_time = '2005-01-01 00:10'
end_time = '2005-01-01 01:10'
to get 5 images captured at 00:12, 00:24, 00:36, 00:48 and 01:00 on 1st January 2005.
Args:
start_time: string indicating start of time interval
end_time: string indicating end of time interval
Returns:
None
'''
# read the time interval
interval = attrs.Time(start_time, end_time)
# query the images captured in the time interval
# download the images to the current directory
result = Fido.search(interval, attrs.Instrument('eit'))
Fido.fetch(result, path = dldir)
################################################################################
def download_multiple_images(s_date, number_of_days):
'''
Download all SOHO/EIT images for each day in the period specified.
Args:
s_date: datetime.date object indicating the date of the first image to download
number_of_days: number of days for which images have to be downloaded
Returns:
None
'''
# download the image captured at 00:12 on each day in the period
for _ in range(number_of_days):
start_time = str(s_date) + ' 00:10'
end_time = str(s_date) + ' 00:15'
sun_data_request(start_time, end_time)
s_date += datetime.timedelta(days = 1)
################################################################################
if __name__ == '__main__':
# check arguments
if(len(sys.argv)) < 2:
print('usage:')
print('\t./query_images.py <number of images to download>')
raise SystemExit
# read the number of images
try:
number_of_days = int(sys.argv[1])
except ValueError:
print('Invalid number of images specified.')
raise SystemExit
# starting date from when images have to be downloaded
s_date = datetime.date(2005, 1, 1)
download_multiple_images(s_date, number_of_days)
|
989,871 | c364011f9925c9ea968f48427350e677df867c44 | from sys import path
from os.path import dirname as dir
path.append(dir(path[0]) + '\\generic_searches')
from state_repr import MAX_NUM, MC_state
from generic_searches import bfs, node_to_path
from generic_data_structures import Node
from typing import List, Optional
def display_solution(path: List[MC_state]) -> None:
if len(path) == 0:
return
old_state: MC_state = path[0]
print(old_state)
for current_state in path[1:]:
if current_state.boat:
print(f'{old_state.right_m - current_state.right_m} missionaries and {old_state.right_c - current_state.right_c} cannibals moved from left to right bank')
else:
print(f'{old_state.left_m - current_state.left_m} missionaries and {old_state.left_c - current_state.left_c} cannibals moved from right to left bank')
print(current_state)
old_state = current_state
if __name__ == "__main__":
start: MC_state = MC_state(3, 3, True)
solution: Optional[Node[MC_state]] = bfs(start, MC_state.goal_test, MC_state.successors)
if solution is None:
print("No solution found.")
else:
path = node_to_path(solution)
display_solution(path)
|
989,872 | d22adca4c34874802b57626b9b91733fdfc79131 | from queue import PriorityQueue
class Solution:
def bestCoordinate(self, towers: List[List[int]], radius: int) -> List[int]:
q = PriorityQueue()
for current in towers:
s = 0
for other in towers:
if current is other:
continue
d = dist(current[:2], other[:2])
if d <= radius:
s -= other[-1] // (1 + d)
q.put([s - current[-1]] + current[:2])
return q.get()[-2:]
def dist(a, b):
return ((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2) ** 0.5 |
989,873 | 153a91ff300f3a2482faf3bbc935543fa436b5da | import nltk
from bs4 import BeautifulSoup
import urllib.request
import csv
from tqdm import tqdm
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
urlpages = [
#'https://esportsobserver.com/echo-fox-opseat-sponsor/',
#'https://esportsobserver.com/lol-eu-minor-leagues/',
#'https://esportsobserver.com/china-recap-feb6-2019/',
#'https://esportsobserver.com/overwatch-league-brazil-broadcast/',
#'https://esportsobserver.com/slg-best-buy-logitechg-challenge/'
#'https://esportsobserver.com/hyperx-marco-reus-brand-ambassador/',
#'https://esportsobserver.com/bird-bird-legal-support-esic/',
#'https://esportsobserver.com/single-player-games-twitch/',
#'https://esportsobserver.com/fortnite-marshmello-concert/'
#'https://esportsobserver.com/globe-telecom-mineski-team-liyab/',
#'https://esportsobserver.com/blizzard-owl-coca-cola/',
#'https://esportsobserver.com/newzoo-ceo-esports-phase-two/'
#'https://esportsobserver.com/psyonix-eleague-rocket-league/',
#'https://esportsobserver.com/esl-paysafecard-through-2019/',
#'https://esportsobserver.com/hyperx-pittsburgh-knights-partner/',
#'https://esportsobserver.com/psg-mobile-legends-team/'
#'https://www.forbes.com/sites/mattperez/2019/02/26/g2-esports-raises-17-3-million-in-series-a-funding',
#'https://esportsobserver.com/rainbow-six-pro-league-rev-sharing/',
#'https://esportsobserver.com/nissan-faze-clan-optic-gaming/',
#'https://esportsobserver.com/csgo-pro-league-france/',
#'https://esportsobserver.com/t1-faceit-apex-legends/',
#'https://esportsobserver.com/aquilini-funding-luminosity-gaming/',
#'https://www.forbes.com/sites/mikeozanian/2018/10/23/the-worlds-most-valuable-esports-companies-1/#48a2b6a06a6e',
#'https://www.forbes.com/sites/forbessanfranciscocouncil/2019/03/11/five-esports-predictions-what-does-the-year-hold-for-companies-and-developers/#18d3edde395b',
'http://www.espn.com/esports/story/_/id/26130966/astralis-build-counter-strike-legacy-iem-katowice-title',
'http://www.espn.com/esports/story/_/id/26136799/where-rainbow-six-siege-goes-g2-pengu-follow',
'http://www.espn.com/esports/story/_/id/26171562/checking-jensen-team-liquid',
'http://www.espn.com/esports/story/_/id/26230906/take-look-fortnite-500000-secret-skirmish',
'http://www.espn.com/esports/story/_/id/26249076/call-duty-franchise-spots-sell-25-million-per-team',
'http://www.espn.com/esports/story/_/id/26265839/team-solomid-zven-smoothie-chat-mistakes-improvements',
'http://www.espn.com/esports/story/_/id/26275659/from-scrims-stage-100-thieves-searches-same-page',
'http://www.espn.com/esports/story/_/id/26298615/looking-griffin-surprising-loss-geng',
'http://www.espn.com/esports/story/_/id/26352540/vancouver-titans-crush-overwatch-league-stage-1'
]
rows = []
sentenceIdx = 131
for urlpage in tqdm(urlpages):
page = urllib.request.urlopen(urlpage)
soup = BeautifulSoup(page, 'html.parser',)
soup.prettify('UTF-8')
#print(soup)
text = ' '.join( x.getText() for x in soup.find('div', {'class': 'article-body'}).find_all('p')) #espn
#text = ' '.join( x.getText() for x in soup.find('div', {'class': 'article-container'}).find_all('p')) #forbes
#text = ' '.join( x.getText() for x in soup.find('div', {'class': 'entry-content'}).find_all('p')) #teo
text = text.replace('“', '"').replace('”', '"').replace("‘", "'").replace("’", "'").replace("…", "...").replace("–", "-")
#print(text)
print('Processing sentences for {}'.format(urlpage))
sentences = tokenizer.tokenize(text)
for sentence in tqdm(sentences):
tokens = nltk.word_tokenize(sentence)
tagged = nltk.pos_tag(tokens)
first_row_sentence = [f'Sentence {sentenceIdx}']
for val in tagged[0]:
first_row_sentence.append(val)
rows.append(first_row_sentence)
for row in tagged[1:]:
a_row = ['']
for val in row:
a_row.append(val)
rows.append(a_row)
sentenceIdx += 1
#print(tagged)
#print('\n-----\n')
#print('\n-----\n'.join('\t'.join(x) for x in rows))
with open('teo_tagged_clean.csv', 'w', encoding='UTF-8') as output:
csv_output = csv.writer(output, delimiter='\t')
csv_output.writerows(rows)
|
989,874 | a6f98cc080a4ff1dfc9c6d804d1e8008b68cb808 | '''
Created on Feb 24, 2014
@author: Jason C Rodriguez
'''
if __name__ == '__main__':
pass |
989,875 | cc4d0550cd4124a1b3b1caacead5c3d321955d07 | import pandas as pd
import numpy as np
countries = pd.read_csv('reference/countries.csv',
header=0,
dtype=str,
encoding='utf-8')[['alpha-2']]
currencies = pd.read_csv('reference/currencies.csv',
header=0,
dtype=str,
encoding='utf-8')[['AlphabeticCode']]
companies = pd.read_csv('reference/companies.csv',
header=0,
dtype=str,
encoding='utf-8')[['source_id']]
base_df = pd.DataFrame.from_dict({'open': [1.0],
'high': [2.0],
'low': [3.0],
'close': [4.0],
'volume': [5.0],
'P/E': [6.0],
'EPS': [7.0]})
countries['key'] = 1
currencies['key'] = 1
companies['key'] = 1
base_df['key'] = 1
big_df = pd.merge(pd.merge(pd.merge(base_df, currencies), countries), companies).drop(['key'], axis=1)
big_df = big_df.rename(columns={'alpha-2': 'country_code', 'AlphabeticCode': 'currency_code', 'source_id': 'company_source_id'})
nrows = big_df.shape[0]
big_df['open'] = np.random.randint(10, 90, nrows)/10.0
big_df['high'] = np.random.randint(20, 150, nrows)/10.0
big_df['low'] = np.random.randint(1, 70, nrows)/10.0
big_df['close'] = np.random.randint(20, 100, nrows)/10.0
big_df['volume'] = np.random.randint(100, 10000, nrows)
big_df['P/E'] = np.random.randint(30, 300, nrows)/10.0
big_df['EPS'] = np.random.randint(100, 10000, nrows)/10.0
print(big_df.head(100))
big_df.to_csv('input_root/big/end_of_day.csv',
index=False,
header=True,
encoding='utf8')
|
989,876 | f489e5fb30b36e955b7e8b31cc937c56a714a38b | # -*- coding: utf-8 -*-
"""
samsaraapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Data(object):
"""Implementation of the 'data' model.
TODO: type model description here.
Attributes:
external_ids (dict<object, string>): TODO: type description here.
harsh_accel_setting (HarshAccelSettingEnum): Harsh Event Detection
Setting * 0: Passenger * 1: Light Truck * 2: Heavy * 3: Off * 4:
Automatic
name (string): Name
"""
# Create a mapping from Model property names to API property names
_names = {
"external_ids":'externalIds',
"harsh_accel_setting":'harsh_accel_setting',
"name":'name'
}
def __init__(self,
external_ids=None,
harsh_accel_setting=None,
name=None):
"""Constructor for the Data class"""
# Initialize members of the class
self.external_ids = external_ids
self.harsh_accel_setting = harsh_accel_setting
self.name = name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
external_ids = dictionary.get('externalIds')
harsh_accel_setting = dictionary.get('harsh_accel_setting')
name = dictionary.get('name')
# Return an object of this model
return cls(external_ids,
harsh_accel_setting,
name)
|
989,877 | 2e73dcffee32c89fff51b7cd3d45109ade9c4016 | import json
import re
import requests
class APIException(Exception):
pass
# конвертер валют
class CurrencyConverter:
"""
класс конвертера, основной метод которого принимает строку определённого формата:
<переведи/перевод/сконвертируй> <сумма> <из чего переводить> <во что переводить>,
и возвращает строку.
Строка может говорить об ошибках: не верный ввод, нет такой валюты, ошибки API
либо просто содержать результат в готовом текстовом виде:
Если перевести {amount} {value_from} в {value_to}, будет: {conversion_result}
"""
# список доступных валют
CURRENCIES = {
'доллар': 'USD',
'евро': 'EUR',
'биткоин': 'BTC',
'эфириум': 'ETH',
'рубль': 'RUR',
'тенге': 'KZT'
}
# Обрабатывает запрос на конвертацию валюты, принимает текст сообщения в str
def conversion(self, message_text: str) -> str:
# распарсили ввод и поместили в переменные
amount, value_from, value_to = self.parse_convert_request(message_text)
# ищем в доступных валютах соответствие по тому что ввели (ввод может быть не корректным - долларов, рублей)
# обратно получаем записи в корректной записи (доллар, рубль)
base = self.search_value(value_from, self.CURRENCIES)
quote = self.search_value(value_to, self.CURRENCIES)
# если не нашли валюты в базе, то говорим, что нету такой
# можно это всё через исключения сделать, но так я посчитал, что будет короче
if not base:
return f'Валюты такой нет у меня: {value_from}. \n' \
f'Список доступных валют можно узнать через команду /values'
if not quote:
return f'Валюты такой нет у меня: {value_to}.\n' \
f'Список доступных валют можно узнать через команду /values'
# если ввели одну и ту же валюту, то говорим о бессмысленности операции
if base == quote:
return f'Смысла в переводе таком не вижу я. \n' \
f'Однако, знай, что {value_from} в {value_to} - один к одному будет переводиться'
try:
# запрос конвертации
# передаём геттеру сумму, и коды валют из словаря
conversion_result = self.get_price(amount, self.CURRENCIES[base], self.CURRENCIES[quote])
except APIException as e:
return f'Волнения в силе ощущаю я. Пошло не так что-то: \n{e.__cause__}'
else:
# возвращаем результат в чат
return f'Если перевести {amount} {value_from} в {value_to}, будет: {conversion_result}'
# непосредственно запрос курса валют через API и подсчёт суммы
@staticmethod
def get_price(amount: int, base: str, quote: str) -> float:
"""
Простой конвертер валют на базе API cryptocompare.com
:param amount: сумма для конвертации: int или float
:param base: валюта из которой надо конвертировать в формате типа USD, EUR, RUR
:param quote: валюта в которую надо конвертировать в формате типа USD, EUR, RUR
:return: результат конвертации в float
"""
# запрос стоимости валюты через API
try:
r = requests.get(f'https://min-api.cryptocompare.com/data/price?'
f'fsym={base}'
f'&tsyms={quote}')
# смотрим через json, что пришло обратно (там будет словарь, поэтому выводим по ключу)
conversion_result = json.loads(r.content)[quote]
except Exception as e:
raise APIException() from e
else:
# если всё ок:
return float(conversion_result) * amount
@staticmethod
def parse_convert_request(text: str) -> tuple:
"""
простой парсер для конвертера, умеет парсить формат:
<сумма> <валюта> <валюта>
между валютами может быть буква "в"
:param text: что парсить
:return:
вернёт кортеж из трёх переменных:
amount - сумма в float или int (если целое число),
value_from - валюта из которой переводим в str и
value_to - валюта, в которую переводим в str
называются так для того, чтобы было сходу понятно даже мне
"""
# сначала отделяем нужную нам группу текста
text = re.search(r'(?:переведи|перевод|c?конвертируй|(?:сколько будет))'
r'.*\s-?(\d+[.,]?\d*\s[A-Za-zА-Яа-яёЁ]{3,}\sв?\s?[A-Za-zА-Яа-яёЁ]{3,})', text)
text = text.group(1)
# берём отдельно число
amount = re.search(r'\d+[.,]?\d*', text)
# если с точкой, то делаем float, если без - то int
amount = float(amount.group().replace(',', '.')) if re.search(r'[.,]', text) else int(amount.group())
# отделяем то, из чего конвертируем
value_from = re.search(r'([A-Za-zА-Яа-яёЁ]{3,})(?=\s?в?\s[A-Za-zА-Яа-яёЁ]{3,})', text)
value_from = value_from.group(1)
# отделяем то, во что конвертируем
value_to = re.search(r'(?:[A-Za-zА-Яа-яёЁ]{3,}\sв?\s?)([A-Za-zА-Яа-яёЁ]{3,})', text)
value_to = value_to.group(1)
return amount, value_from, value_to
@staticmethod
def search_value(value: str, values: dict) -> str:
"""
Простой помощник, для определения того, что за валюту ввёл пользователь
(ввод может быть не "рубль", а "рублей", например)
ищет по первым трём буквам в словаре с доступными валютами
:param value: валюта, как была введена изначально
:param values: словарь с доступными валютами
:return: вернёт валюту в верном формате, как она записана в словаре, либо None
"""
# берём первые три буквы в строке
short_value = value[:3]
# ищем в словаре соответствия и возвращаем то, с чем совпало
for key in values.keys():
if short_value in key:
return key
|
989,878 | 5039d23c7298c18c58a5869d4ff5cb53c40d522a | from bottle import route, run, static_file, request, response
import os
import shutil
import releaseJS
import docs
import json
@route('/style/<filename:path>')
def send_style(filename):
return static_file(filename, root='../style')
@route('/script/<filename:path>')
def send_script(filename):
return static_file(filename, root='../script')
@route('/data/<filename:path>')
def send_data(filename):
return static_file(filename, root='../data')
@route('/dialog/<filename:path>')
def send_dialog(filename):
return static_file(filename+'.html', root='../dialog')
@route('/demo/<filename:path>')
def send_demo(filename):
return static_file(filename, root='../demo')
@route('/workspace/<filename:path>')
def send_file(filename):
return static_file(filename, root='../workspace')
@route('/docs/<filename:path>')
def searchdocs(filename):
cls = request.query.get('cls')
args = request.query.get('args')
return docs.search(cls,args)
@route('/main')
def hello():
print 'test'
return static_file('index.html' , root="../")
@route('/createTemplate')
def createTemplate():
path = request.query.get('path')
src = request.query.get('src')
dest = request.query.get('dest')
print 'create %s from template %s'%('%s/%s'%(path,dest),src)
shutil.copy2('../template/'+src,'%s/%s'%('../'+path[1:],dest))
return 'sucess'#static_file(filename, root='./',mimetype='text')
@route('/mkdir')
def mkdir():
path = request.query.get('path')
print 'create directory %s'%path
os.mkdir('../'+path[1:])
return 'sucess'#static_file(filename, root='./',mimetype='text')
@route('/read/<filename:path>')
def readFile(filename):
#response.content_type = 'text/html; charset=UTF-8'
print 'read from %s success'%filename
return static_file(filename, root='../',mimetype='text')
@route('/remove/<filename:path>')
def removeFile(filename):
#response.content_type = 'text/html; charset=UTF-8'
os.remove('../'+filename)
print 'remove from %s success'%filename
return 'sucess'
@route('/exist/<filename:path>')
def removeFile(filename):
#response.content_type = 'text/html; charset=UTF-8'
if(os.path.exists('../'+filename)):
return 'true'
else:
return 'false'
@route('/write', method='POST')
def writeFile():
path = request.forms.get('path')
data = request.forms.get('data')
#print 'post',path,data
f=open('../'+path[1:],'w+')
f.write(data)
print 'write to %s success'%path
return 'success'
@route('/files/<filepath:path>', method='POST')
def uploadFile(filepath):
'''print filepath
upload = request.files.get('upload')
upload.save('../'+filepath,True)
filename = upload.filename
print filename
return filename'''
uploadfiles=[];
num_files=int(request.forms.get('num_files'))
multiple=request.forms.get('multiple')[0].upper()=='T'
if not multiple:
upload = request.files.get('image0')
upload.save('../'+filepath,True)
print upload.filename
return upload.filename
else:
for i in range(0,num_files):
upload = request.files.get('image%d'%i)
upload.save('../'+filepath,True)
uploadfiles.append(upload.filename)
print upload.filename
return json.dumps(uploadfiles)
@route('/temp', method='POST')
def readTempFile():
upload = request.files.get('upload')
return upload.file.read()
#return f.read()
@route('/release/<filepath:path>')
def releaseFile(filepath):
releaseJS.releaseProject('../'+filepath+'/')
return 'success'
run(host='localhost', port=8080, debug=True)
|
989,879 | 33dc30c281ad5853dc8faee7c24c31583559db7b | N = int(input())
As = list(map(int, input().split(" ")))
IDs = range(1, len(As)+1)
result_dict = dict(zip(IDs,As))
result_dict = sorted(result_dict.items(), key=lambda x:x[1])
result_list = list(map(lambda x: str(x[0]), result_dict))
result = " ".join(result_list)
print(result)
|
989,880 | c43f5f579acfd24e8ea9cb0b8ad002fb9923cf5a | import random
from flask.json import jsonify
from random import randrange
def generate_password(size=10):
return ''.join(random.choice("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRTSUVWXYZ0123456789") for _ in range(size))
def verify(fieldValue):
return fieldValue if fieldValue else "N/A"
def prepare_response(result, error = None):
if result:
return jsonify(response = result)
else:
return jsonify(response = False, error = error if error else "resource_not_found")
def generate_result():
r_a = randrange(10,21)
r_b = randrange(10,21)
if r_a > r_b:
r_a = 21
elif r_b > r_a:
r_b = 21
elif r_a == r_b:
if r_a == 20 or r_a == 21:
r_a += 2
else:
r_b = 21
return (r_a,r_b) |
989,881 | 8e17b41cec90243eea830d304a9850cc370f7a70 | from django.shortcuts import render, redirect, HttpResponse
from django.contrib.auth import login as log, authenticate, logout
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from .form import LoginForm, RegisterForm
from .models import Auct, Feed, Wallet, Storage
from managing import utils
from managing.views import addAuction, tip, permanentSaving, getWorkflow
from datetime import datetime
from django.conf import settings
import random
def getHomeData():
nwrk = []
auctions = Auct.objects.filter(active=True).values()
wrk = getWorkflow()
for item in wrk:
nwrk.append(item.decode('utf-8'))
return auctions, nwrk
#Check every expired auctions
def checkExpiredAuction():
x = datetime.now()
nowDate = datetime.strftime(x, "%Y-%m-%d %H:%M") #now date
auctions = Auct.objects.filter(active=True).values()
if auctions is not None:
for item in auctions:
corDate = datetime.strftime(item['endData'], "%Y-%m-%d %H:%M") #end datetime
if nowDate >= corDate:
det = Auct.objects.get(id=item['id'])
det.active = False
det.save()
winner = User.objects.get(id=item['buyer_id'])
permanentSaving(det, winner)
@login_required(login_url='/login') #the non-logged in user is redirected to the login page
def homePage(request):
if request.method == 'POST':
form = request.POST
if 'feed' in form:
email = form['email']
content = form['feed']
newFeed = Feed.objects.create(user=request.user, content=content)
newFeed.save()
auctions, nwrk = getHomeData()
return render(request, 'user/homePage.html', {'auctions':auctions, 'workflow': nwrk})
else:
identifAuct = form['identif']
tipPrice = form['bet']
identifUser = request.user.username
x = datetime.now()
dateBet = datetime.strftime(x, "%Y-%m-%d %H:%M")
thisPriceAuct = Auct.objects.filter(id=int(identifAuct)).values()[0]
#check if the stake is lower than the current price
#the stake can be equal or bigger than current price
if float(tipPrice) < thisPriceAuct['price']:
messages.warning(request, "Bid too low!!")
auctions, nwrk = getHomeData()
return render(request, 'user/homePage.html', {'auctions':auctions, 'workflow': nwrk})
else:
#the stake can be also equal than current price
getWalletInfo = Wallet.objects.filter(owner=request.user).values()[0]
#check if the user has sufficient balance
if getWalletInfo['euroBalance'] < float(tipPrice):
messages.warning(request, "Insufficient budget to carry out his bet")
auctions, nwrk = getHomeData()
return render(request, 'user/homePage.html', {'auctions':auctions, 'workflow': nwrk})
else:
#save offer on Redis
tip(identifAuct, tipPrice, identifUser, dateBet)
#update the offered price
updatePriceAuct = Auct.objects.get(id=int(identifAuct))
updatePriceAuct.price = float(tipPrice)
updatePriceAuct.buyer = request.user
updatePriceAuct.save()
#update user balance
userWallet = Wallet.objects.get(owner=request.user)
userWallet.euroBalance -= float(tipPrice)
userWallet.save()
auctions, nwrk = getHomeData()
messages.success(request, "Bet placed successfully")
return render(request, 'user/homePage.html', {'auctions': auctions, 'workflow':nwrk})
else:
#check the expired auctions
checkExpiredAuction()
auctions, nwrk = getHomeData()
return render(request, 'user/homePage.html', {'auctions':auctions, 'workflow': nwrk})
def retUserId(ident):
thisUser = User.objects.get(id=ident)
return thisUser
def shoWinner(request):
try:
infoAuct = Auct.objects.filter(buyer=request.user.id, active=False).values()
return render(request, 'user/winPage.html', {'infoWin': infoAuct})
except:
return render(request, 'user/winPage.html', {})
def shoWallet(request):
walletInfo = Wallet.objects.get(owner=request.user.id)
return render(request, 'user/walletPage.html', {'infoWallet':walletInfo})
@login_required(login_url='/login') #the non-logged in user is redirected to the login page
def adminPanel(request):
if request.method == 'POST':
form = request.POST
if form:
try:
obj = form['object']
price = form['price']
endDate = form['endDate']
endTime = form['endTime']
endDateTime = endDate+' '+endTime
newAuction = Auct.objects.create(nobject=obj, buyer=request.user , price=price, endData=endDateTime)
aucId = newAuction.id
newAuction.save()
nwrk = []
wrk = getWorkflow()
for item in wrk:
nwrk.append(item.decode('utf-8'))
messages.success(request, "Auction successfully added!")
#Save the initial data of this auction on Redis using managing methods
addAuction(aucId)
feeds = Feed.objects.filter().values()
auctions = Auct.objects.filter().values()
return render(request, 'user/adminPanel.html', {'auctions':auctions, 'feed':feeds, 'workflow':nwrk})
except:
messages.warning(request, "New auction creation failed. Check all fields")
feeds = Feed.objects.filter().values()
auctions = Auct.objects.filter().values()
nwrk = []
wrk = getWorkflow()
for item in wrk:
nwrk.append(item.decode('utf-8'))
return render(request, 'user/adminPanel.html', {'auctions':auctions, 'feed':feeds, 'workflow':nwrk})
else:
feeds = Feed.objects.filter().values()
auctions = Auct.objects.filter().values()
nwrk = []
wrk = getWorkflow()
for item in wrk:
nwrk.append(item.decode('utf-8'))
return render(request, 'user/adminPanel.html', {'auctions':auctions, 'feed':feeds, 'workflow':nwrk})
def login(request): #User access
if request.method == 'POST':
form = request.POST
if form:
username = form['username']
password = form['password']
try:
user = authenticate(username=username, password=password)
if user.is_superuser:
messages.success(request, "Login successful. Welcome back administrator!")
log(request, user)
return redirect('/adminPanel')
else:
checkExpiredAuction()
messages.success(request, f"Login successful. Welcome back {username}")
log(request, user)
return redirect('/')
except:
messages.warning(request, "Login failed!")
form = LoginForm()
return render(request, 'user/login.html', {'form':form})
else:
form = LoginForm()
return render(request, 'user/login.html', {'form':form})
def log_out(request):
logout(request)
return redirect("/login")
def registration(request): #User registration
if request.method == 'POST':
form = request.POST
if form:
username = form['username']
email = form['email']
password = form['password']
#create object User
user = User.objects.create_user(username=username, email=email, password=password)
user.save()
#create user Wallet
randEuroBalance = random.randrange(5000, 30000)
wallet = utils.createWallet(password)
thisWallet = Wallet.objects.create(owner=user, address=wallet['address'], privateKey=wallet['cryptKey'], ethBalance=wallet['balance'], euroBalance=randEuroBalance)
thisWallet.save()
messages.success(request, "Registration successful")
form = LoginForm()
return login(request)
else:
messages.warning(request, "Registration failed")
form = RegisterForm()
return render(request, 'user/registration.html', {'form':form})
else:
form = RegisterForm()
return render(request, 'user/registration.html', {'form':form})
|
989,882 | 5d4a7138f406422698fd80530876ee4611784fff | from django.shortcuts import render
def index(request):
return render(request, 'lesson/index.html')
def lesson_detail(request):
return render(request, 'lesson/lessondetail.html')
|
989,883 | 5d2bbb88a0b39278508320f84103bba6ccd13414 | """x=10
y=15
print("Addition is:",x+y)
print("Subtraction is:",x-y)
print("Multiplication is:",x*y)
print("division is:",x/y)
print("module is:",x%y)"""
x=int(input("enter first number"))
y=int(input("enter second number"))
z=int(input("enter third number"))
if x>z and x>y:
print(x,"is greatest")
if y>x and y>z:
print(y,"is greatest")
if z>x and z>y:
print(z,"is greatest") |
989,884 | 681ac048d1e5e4ba5217d9638fe8b116aeb7f0d3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Tripoli 基类,用来处理Tripoli输出数据
from __future__ import print_function
import re
import time
from .atom_mass import atom_mass
class BaseTripoli(object):
def __init__(self, postFile=None):
# 科学计数法
self._science_num = r'\d\.?\d*[eE][\-\+]\d+'
# Tripoli输出文件
self._postFile = postFile
# 本程序的日志文件
self._log = (self._postFile or 'Myproject') + '.log'
# 出现错误休眠时间
self._dt = 100
def _rmComment(self, content):
# 去除Tripoli的注释
cont = re.sub(r'//.*', '', content)
cont = re.sub(r'/\*[\w\W]*?\*/', '', cont)
return cont
def writeLog(self, *args):
log = open(self._log, mode='a')
print(time.strftime('%m-%d,%H:%M:%S') + ':', *args, sep=' ', end='\n', file=log)
##更改数据格式
def _V_R(self):
# creat a self.total attribute to store the result in the format:
# self.total[volume][reaction]=list of the result of the reation and in the voulume
# self.score[volume][reaction][isotope]=score
# volume and reaction type
self.volume = list({i['volume'] for i in self._dl})
self.reaction = list({i['reaction'] for i in self._dl})
self.isotopes = list({i['nucleus'] for i in self._dl})
self.volume.sort()
self.reaction.sort()
self.isotopes.sort()
self.total = {i: {j: [dd for dd in self._dl \
if dd['volume'] == i and j == dd['reaction']] \
for j in self.reaction} for i in self.volume}
self.score = {i: {j: {dd['nucleus']: dd['score'] for dd in self._dl
if dd['volume'] == i and j == dd['reaction']}
for j in self.reaction} \
for i in self.volume}
self.spectrum = {i: {j: {dd['nucleus']: dd['spectrum'] for dd in self._dl
if dd['volume'] == i and j == dd['reaction']}
for j in self.reaction} \
for i in self.volume}
# 处理Tripoli输出信息
def maketable(self, filename=None):
if filename:
self._postFile = filename
f = open(self._postFile)
content = f.read()
f.close()
science_num = self._science_num
d_l = []
try:
# pattern for the input info
input_pattern = r'checking association of compositions and volumes : ok([\w\W]+)\s+Loading response functions'
self._input = re.findall(input_pattern, content)[0]
self._input = self._rmComment(self._input)
# content for lastest batch
pattern1 = r'(RESULTS ARE GIVEN FOR SOURCE INTENSITY[\W\w]+?simulation time)'
content1 = re.findall(pattern1, content)[-1]
# block
mod = r'(RESPONSE FUNCTION : REACTION\s+[\w\W]+?' + \
r'number of batches used:\s\d+\s\d\.\d+e[\-\+]\d+\s\d' + \
r'\.\d+e[\-\+]\d+)\s+\n\n\n\n\*\*\*\*\*\*\*\*\*\*\*'
block = re.findall(mod, content1)
# pattern
p_nucleus = r'reaction on nucleus : ([A-Z]+\d+)\s'
p_volume = r'num of volume : (\d+)\s'
p_reaction = r'reaction consists in codes : \n\t\t(\d+)'
p_batch = r'number of batches used: (\d+)\t'
p_score2 = r"\s+ENERGY INTEGRATED RESULTS\s+number of first discarded batches :\s+\d+\s+number of batches used: \d+\s+(" + \
science_num + r')\s+(' + science_num + r')'
p_spectrum = r'SPECTRUM RESULTS([\w\W]+?)ENERGY INTEGRATED RESULTS'
p_sp = r'(' + science_num + r')\s+\-\s+(' + science_num + r')\s+(' + science_num + r')\s+(' + science_num + r')\s+(' + science_num + r')\s+'
# dictionary
for blck in block:
nucleus = ""
if blck.find('H1_H2O') != -1:
nucleus = 'H1_H2O'
else:
nucleus = re.findall(p_nucleus, blck)[0]
reaction = int(re.findall(p_reaction, blck)[0])
batch = re.findall(p_batch, blck)
volume = re.findall(p_volume, blck)
score = re.findall(p_score2, blck)
sigma = re.findall(p_score2, blck)
sp_blck = re.findall(p_spectrum, blck)
for b, v, s, sig, sp in zip(batch, volume, score, sigma, sp_blck):
d = {}
d['nucleus'] = nucleus
d['batch'] = int(b)
d['volume'] = int(v)
d['score'] = float(s[0])
d['reaction'] = reaction
d['sigma'] = float(sig[1])
spectrum = []
spec = re.findall(p_sp, sp)
for i in spec:
thesp = {}
sp_re = i
thesp['group'] = (float(sp_re[0]), float(sp_re[1]))
thesp['score'] = float(sp_re[2])
thesp['sigma'] = float(sp_re[3])
thesp['score//lethargy'] = float(sp_re[4])
spectrum.append(thesp)
d['spectrum'] = spectrum
d_l.append(d)
except IndexError:
print(self._postFile)
print("INDEXERROR: is the file a coorect Tripoli RESULT file?")
time.sleep(self._dt)
self.maketable()
return self.max_sigma
Sigma = {dd['sigma'] for dd in d_l}
self._dl = d_l
Sigma.add(0)
self.max_sigma = max(Sigma)
self._V_R()
return self.max_sigma
def parsing_input(self):
# parsing the input infomation
# creat the attribute self.comps which store the all compostions in a list
# self.geoCom store the geoCom info
if not hasattr(self, '_input'):
self.maketable()
# get the relation between compositions and volums
self._input = self._rmComment(self._input)
geoCom_pattern = r'GEOMCOMP([\w\W]+?)END_GEOMCOMP'
geostr = re.findall(geoCom_pattern, self._input)[0]
geoCom_line = r'(\w+)\s+(\d+\s+[\d\s]+)'
geoC_0 = re.findall(geoCom_line, geostr)
self.geoCom = []
for i in geoC_0:
theGeo = {'name': i[0]}
nums = re.findall(r'(\d+)', i[1])
theGeo['num'] = int(nums[0])
theGeo['volume'] = int(nums[1]) # there is only one volume
self.geoCom.append(theGeo)
self._getCompositionSection()
self._reformatGeoComps()
self._getFuelVol()
self._getRealVol()
self._getBoronConcentration()
def _getCompositionSection(self):
# 获取composition部分
# get composition section
comps_pattern = r'COMPOSITION\s+\d+\s+([\w\W]+?)END_COMPOSITION'
comps_str = re.findall(comps_pattern, self._input)[0]
comps_0 = comps_str.split('POINT_WISE')[1:]
isotope_pattern = r'([A-Z]+\-??\w*?)\s+' + r'(\d\.?\d*[eE]{0,1}[\-\+]{0,1}\d+)'
self._comps = []
self._mat_name = set()
##self._comps 数据格式 list,元素是一个dict,每个dict里面是一种材料的所有信息
for i in comps_0:
theComp = {'name': re.findall(r'\d+\s+(\w+)\s+\d+?', i)[0]}
self._mat_name.add(theComp['name'])
i = i.replace(theComp['name'], '')
isotopes = re.findall(isotope_pattern, i)
for isotope in isotopes:
theComp[isotope[0]] = float(isotope[1])
self._comps.append(theComp)
for i in range(len(self.geoCom)):
for j in self._comps:
if self.geoCom[i]['name'] == j['name']:
self.geoCom[i].update(j)
def _reformatGeoComps(self):
# reformat the geo_comps in the format self.concentration[vol][isotope]=concentration
self.concentration = {}
for i in self.geoCom:
this_v = {}
the_vol = -1
for info in i:
if info == 'volume':
the_vol = i[info]
else:
this_v[info] = i[info]
self.concentration[the_vol] = this_v
# get the vol number with the material name
self._mat_vol = {}
for i in self.geoCom:
for mat in self._mat_name:
if i['name'] == mat:
self._mat_vol[mat] = i['volume']
self._mat_vol[i['volume']] = mat
def _getFuelVol(self):
# 自动分析成分,获取燃料的体积
self._comV = {self._mat_vol[i['name']] for i in self._comps \
if ('U235' in i or 'U238' in i or 'PU239' in i)}
def _getRealVol(self):
# get the real volume in cm3
# 获取体积的真实体积大小
real_V_pattern = r'VOLSURF([\w\W]+?)END_VOLSURF'
try:
vol_str = re.findall(real_V_pattern, self._input)[0]
vol_pattern = r'(\d+)\s+(\d+\.\d+)?(' + self._science_num + ')?\s+'
vols = re.findall(vol_pattern, vol_str)
self.realVol = {int(vol[0]): float(vol[1] + vol[2]) for vol in vols}
except IndexError:
msg = "无法找到体积参数,请手工添加体积参数在VOLSURF END_VOLSURF模块中\n", \
"Cannot find the volume paramter, please add the paramter into the [VOLSURF END_VOLSURF] module manually.\n", \
"The program will abort!!\n", \
"Entre any key to exit!!\n"
print(msg)
self.writeLog(msg)
# exit(0)
def _getBoronConcentration(self):
# 计算硼浓度
# compute the boron concentration
mass = atom_mass()
self.water = []
self.fuel = []
self.uranium_concentration = []
self.boron_concentration = []
for composition in self.geoCom:
keys = composition.keys()
## boron section ########
B_key = ("B-NAT" in keys and "B_NAT") or \
("B10" in keys and "B10") or \
("B11" in keys and "B11")
B = ["B10", "B11", "B-NAT", "b10", "b11", "b-nat", "b-NAT"]
if B_key:
h1_key = ("H1" in keys and "H1") or \
("H_H2O" in keys and "H_H2O") or \
("H-NAT" in keys and "H-NAT")
O_key = ("O16" in keys and "O16") or \
("O-NAT" in keys and "O-NAT")
try:
H = composition[h1_key]
O = composition[O_key]
except KeyError:
continue
if abs(H / 2 - O) < 1e-6:
water = ["H1", "O-NAT", "H-NAT", "O16"]
#### this is water ###
totalmass = 0.0
### water density ####
density = 0.0 # unit kg/m3
for i in water:
try:
totalmass += composition[i] * mass[i]
density += mass.getKg(i) * 1e30 * composition[i]
except KeyError:
continue
composition["density"] = density
self.water.append(composition)
#### boron concentration###
boron = 0.0
for b in B:
try:
boron += composition[b] * mass[b]
except KeyError:
pass
cb = boron / totalmass * 1e6
self.boron_concentration.append(cb)
##### fuel section#####
if "U235" in keys:
self.fuel.append(composition)
O = 0.0
for i in ["O-NAT", "O16"]:
try:
O += composition[i]
except KeyError:
continue
cu = 2 * composition["U235"] / O * 100
self.uranium_concentration.append(cu)
# return cb,cu
|
989,885 | f7312ad23ff7f25ef8a8a2a173bc923c69d9e2bd | import requests
token = '1557387128:AAGYSbYfHhB0BC39kcPoUKKsZtl1Wz7-VHs'
app_url = f'https://api.telegram.org/bot{token}'
update_url = f'{app_url}/getUpdates'
response = requests.get(update_url).json()
chat_id = response.get('result')[0].get('message').get('from').get('id')
text = '방가방가'
message_url = f'{app_url}/sendMessage?chat_id={chat_id}&text={text}'
requests.get(message_url) |
989,886 | c0ece7ee95adc0f39d1b0ec378b7230cb7c82f68 | def transform_subject_no(subject_no: int, loop_size: int) -> int:
value = 1
for _ in range(loop_size):
value *= subject_no
value %= 20201227
return value
def part1(card_pub_key: int, door_pub_key: int):
value = 1
subject_no = 7
for card_loop_size in range(1, 100000000):
value *= subject_no
value %= 20201227
if value == card_pub_key:
print(transform_subject_no(door_pub_key, card_loop_size))
break
def main():
f = open('inputs/day25.txt')
card_pub_key = int(f.readline().strip())
door_pub_key = int(f.readline().strip())
part1(card_pub_key, door_pub_key)
if __name__ == "__main__":
main()
|
989,887 | 2e342d4d7e3629f18cba25525c9c586219fcb551 | import Zero
import Events
import Property
import VectorMath
class CheckForController:
gamepad = Zero.Gamepads.GetGamePad(0)
def Initialize(self, initializer):
Zero.Connect(self.Space, Events.LogicUpdate, self.update)
def update(self, Event):
gamepad = Zero.Gamepads.GetGamePad(0)
print("getgamepad")
if self.gamepad == None:
print("no gamepad")
Zero.RegisterComponent("CheckForController", CheckForController) |
989,888 | 244188d2f3b88dbfac456165b863b84c9d719d63 | import logging, datetime, random
from work_materials.globals import *
from work_materials.constants import archer_photo_ids, sentinel_photo_ids, knight_photo_ids
def set_class(bot, update):
print("here")
mes = update.message
current_class = mes.text.partition(" ")[0]
if current_class in classes_list:
class_skill = None
if current_class == 'Ranger':
class_skill = int(mes.text.partition("Aiming")[0][:-2].split()[-1])
logging.info("class_skill = {0}".format(class_skill))
request = "update users set user_class = %s, class_skill_lvl = %s where telegram_id = %s"
cursor.execute(request, (current_class, class_skill, mes.from_user.id))
bot.send_message(chat_id = mes.chat_id, text = "Класс успешно обновлён, <b>{0}</b>".format(current_class),
parse_mode = 'HTML')
def knight_critical(bot, update):
file_id = random.choice(knight_photo_ids)
response = "Сегодня на поле боя ты показал невиданную храбрость, и удача к тебе была благосконна - твоя ярость" \
" не знала границ, твои соратники уважительно преклоняют голову перед тобой"
bot.sendPhoto(chat_id = update.message.chat_id, reply_to_message_id = update.message.message_id,
caption = response, photo=file_id)
def sentinel_critical(bot, update):
file_id = random.choice(sentinel_photo_ids)
response = "Зачастую люди рискуют другими, чтобы защитить себя. Истинный Защитник рискует собой, дабы защитить других.\n" \
"Сегодня ты сражался за всех тех людей, что остались в замке и со страхом ждали конца сражения."
bot.sendPhoto(chat_id = update.message.chat_id, reply_to_message_id = update.message.message_id,
caption = response, photo=file_id)
def ranger_notify(bot, job):
context = job.context
response = "Поднимай свой лук, <b>{0}</b>\n@{1}".format(context[1], context[0])
file_id = random.choice(archer_photo_ids)
try:
bot.sendPhoto(chat_id=context[2], caption=response, photo=file_id, parse_mode='HTML')
except BadRequest:
bot.send_message(chat_id = admin_ids[0], text = "Ошибка при отправке уведомления лучнику, photo_id =\n{0}".format(file_id))
def rangers_notify_start(bot, update, time_to_battle):
try:
callback_chat_id = update.message.chat_id
except AttributeError:
try:
callback_chat_id = int(update)
except TypeError:
return
guild_names = guilds_chat_ids.keys()
count = 0
for guild in guild_names:
request = "select telegram_username, username, class_skill_lvl from users where guild = %s and class_skill_lvl is not NULL"
cursor.execute(request, (guild, ))
row = cursor.fetchone()
chat_id =guilds_chat_ids.get(guild)
while row:
telegram_username = row[0]
username = row[1]
class_skill_lvl = row[2]
context = [telegram_username, username, chat_id]
print(class_skill_lvl)
time_to_aim_mins = ranger_aiming_minutes[class_skill_lvl]
time_to_aim = datetime.timedelta(minutes=time_to_aim_mins)
time_to_notify = time_to_battle - time_to_aim
if time_to_notify >= datetime.timedelta(minutes=0):
job.run_once(ranger_notify, time_to_notify, context=context)
#job.run_once(ranger_notify, 1, context=context) TEST
row = cursor.fetchone()
count += 1
bot.send_message(chat_id = callback_chat_id, text = "Запланировано оповещение <b>{0}</b> бедных лучников".format(count),
parse_mode = 'HTML')
|
989,889 | 688048332163bb65f4b65e09b12f6ac919fb9df5 | s = input("Введіть послідовність чилел\n")
a = s.split()
n=len(a)
b=0
for i in range(n):
a[i]=int(a[i])
for i in range(n):
if i==n-1:
break
elif a[i]==a[i+1]:
b=b+1
print(b)
|
989,890 | ae85d7885df9df5f8835c5ab2cfeb3a23cb8ec39 | #!/usr/bin/python
import socket
import random
from tcp_tools import set_name, set_conn, close_conn, log, pp_host, send, receive
HOST_PAIR = ("127.0.0.1", 2001)
set_name("Server")
try:
log("Listening on {}.".format(pp_host(HOST_PAIR)))
conn = None
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(HOST_PAIR)
listener.listen(1)
operands = [None, None]
operands[0] = random.randrange(100)
operands[1] = random.randrange(100)
operator = random.choice(['+', '-', '*', '/'])
if operator == '+':
answer = operands[0] + operands[1]
elif operator == '-':
answer = operands[0] - operands[1]
elif operator == '*':
answer = operands[0] * operands[1]
else:
answer = operands[0] / operands[1]
challenge = "{} {} {}".format(operands[0], operator, operands[1])
log("Set challenge to '{}' (answer is {}).".format(challenge, answer))
conn, sender = listener.accept()
set_conn(conn)
log("Got a connection from {}.".format(pp_host(sender)))
send("CHALLENGE {}".format(challenge))
data = receive()
if int(data) == answer:
send("OK")
data = receive()
if data == "FLAGPLS":
send("<3><")
else:
send("NO")
except socket.error, e:
log("-!- {}".format(str(e)))
except KeyboardInterrupt:
log("Killed.")
finally:
close_conn()
|
989,891 | a0a563b39f0de92071054959437fb456348e0e27 | import pymongo
import pymysql
import redis
from .settings import *
class MongoClient:
def __init__(self,dbname,tablename):
self.client = pymongo.MongoClient(host=MONGO_HOST,port=MONGO_PORT)
self.dbname=dbname
self.tablename = tablename
@property
def db(self):
db = self.client[self.dbname]
return db
@property
def table(self):
table = self.db[self.tablename]
return table
class RedisClient:
def __init__(self):
self.rd = redis.Redis()
def put_task(self,set_name,url):
if not isinstance(set_name,str):
set_name = str(set_name)
if not isinstance(url,str):
url = str(url)
self.rd.sadd(set_name,url)
def get_task(self,set_name):
task = self.rd.spop(set_name)
# print(task)
task = task.decode()
task = eval(task)
# print(task)
return task
class MysqlClient:
def __init__(self):
self.mysql = pymysql.Connect()
if __name__ == '__main__':
pass
mg = MongoClient()
mg.save('newdb','newtable','zhangsan','n') |
989,892 | c1afa87ceaf9e7077999139782c5756da9100c6e | from django.conf.urls.defaults import *
urlpatterns = patterns('lbscontacts.views',
url(r'^$', 'index'),
url(r'^index/', 'index'),
url(r'^test/', 'test'),
url(r'^weixin/', 'weixin'),
url(r'^album/', 'album'),
url(r'^sms/', 'sms'),
url(r'^bye/', 'bye'),
url(r'^createclass/', 'create_class'),
url(r'^changeclass/(\d+)/$', 'change_class'),
url(r'^enterclass/(\d+)/$', 'enter_class'),
url(r'^createstudent/(\d+)/$', 'create_student'),
url(r'^changestudent/(\d+)/$', 'change_student'),
)
urlpatterns += patterns('lbscontacts.ajax',
url(r'^ajax/queryclass/', 'query_class'),
url(r'^ajax/createclass/', 'create_class'),
url(r'^ajax/querycollege/', 'query_college'),
url(r'^ajax/querydepartment/', 'query_department'),
url(r'^ajax/querylogin/', 'query_login'),
url(r'^ajax/upload/(\d+)/$', 'upload'),
url(r'^ajax/queryalbum/', 'query_album'),
)
urlpatterns += patterns('lbscontacts.interface',
url(r'^interface/validate/', 'validate'),
) |
989,893 | c3b861e5d2f1e88d464e530391060672fa0928f3 | import unittest
import tempfile
import yaml
import os
import numpy
import netCDF4
from satistjenesten import data
config_string = """
bands:
reflec_1:
long_name: reflectivity
latitude_name: latitude
longitude_name: longitude
"""
class TestScene(unittest.TestCase):
def setUp(self):
self.scene = data.SatScene()
self.input_filename = 'test_data/metop-b.nc'
def test_VanillaSceneObject_HasNoConfig(self):
self.assertIsNone(self.scene.config_dict)
def test_Scene_ParsesYamlConfig(self):
expected_value = config_string
self.scene.parse_yaml_config(config_string)
self.assertTrue(self.scene.config_dict == yaml.load(config_string))
def test_Scene_LoadsConfig_FromFile(self):
tmp_config_file = tempfile.NamedTemporaryFile()
tmp_config_file_path = tmp_config_file.name
tmp_config_file.write(config_string)
tmp_config_file.seek(0)
scene = self.scene
scene.config_filepath = tmp_config_file_path
self.assertIsNone(scene.config_dict)
scene.load_config_from_file()
self.assertEqual(scene.config_dict, yaml.load(config_string))
tmp_config_file.close()
def test_SatScene_PopulatesFromConfig(self):
scene = self.scene
scene.config_filepath = os.path.join('test_data', 'test_config.yml')
scene.input_filename = self.input_filename
scene.load_config_from_file()
scene.get_bands()
self.assertTrue(hasattr(scene, scene.config_dict.keys()[0]))
def test_SatScene_ObtainsCoordinates(self):
scene = data.SwathSatScene()
scene.config_filepath = os.path.join('test_data', 'test_config.yml')
scene.input_filename = self.input_filename
scene.load_config_from_file()
scene.get_coordinates()
self.assertIsNotNone(scene.latitudes)
self.assertIsNotNone(scene.longitudes)
class TestSceneResampling(unittest.TestCase):
def setUp(self):
self.scene = data.SatScene()
self.scene.input_filename = os.path.join('test_data', 'metop-b.nc')
self.scene.config_filepath = os.path.join('test_data', 'test_config.yml')
self.scene.load_scene_from_disk()
def test_SatScene_ResamplesToDefinedArea(self):
scene = self.scene
scene.area_name = 'nsidc_stere_north_300k'
gridded_scene = scene.resample_to_area()
self.assertTrue(gridded_scene.area_def is not None)
self.assertTrue(gridded_scene.gridded)
class TestExportNetcdf(unittest.TestCase):
def setUp(self):
self.output_filepath = "/tmp/t.nc"
self.scene = data.GriddedSatScene()
self.scene.bands['test_band'] = data.SatBand()
self.array_shape = (100, 100)
self.scene.bands['test_band'].data = numpy.random.rand(100, 100)
def test_Save_SatScene_AsNetcdf(self):
scene = self.scene
scene.output_filepath = self.output_filepath
scene.write_as_netcdf()
output_dataset = netCDF4.Dataset(self.output_filepath, 'r')
self.assertTrue(isinstance(output_dataset, netCDF4.Dataset))
def tearDown(self):
# os.remove(self.output_filepath)
pass
class TestRescale(unittest.TestCase):
def setUp(self):
self.array = numpy.ones((10, 10))
def test_window_blocks_Returns_Smaller_Array(self):
large_array = self.array
flat_large_array = large_array.flatten()
window_size = 4
expected_number_of_blocks = flat_large_array.shape[0] / window_size
small_array = data.window_blocks(flat_large_array, 4)
self.assertSequenceEqual((expected_number_of_blocks, window_size), small_array.shape)
|
989,894 | 82371aca3710abe2a3af4c26de5b431300547a7e | # coding: utf-8
import re
import uuid
import os
from django.views.generic import TemplateView
from django.template.context_processors import csrf
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.contrib.auth import authenticate, login, logout
from django.http import JsonResponse, HttpResponse
from .models import Dummy, Report
from .tasks import make_csv_task
class PlaneksView(TemplateView):
def get(self, request, *args, **kwargs):
print(request.user)
print(csrf(request))
context = self.get_context_data(request, **kwargs)
context.update(csrf(request))
return self.render_to_response(context)
def get_context_data(self, request, **kwargs):
context = super(PlaneksView, self).get_context_data()
if request.user.is_authenticated:
context['data'] = Dummy.objects.filter(owner=request.user)
else:
context['data'] = []
return context
# @csrf_protect
def planeks_login(request):
print(csrf(request))
user = request.POST.get('username')
password = request.POST.get('password')
ath = authenticate(request=request, username=user, password=password)
if ath:
login(request, ath)
redir = re.search(r'\?next=.+', request.POST.get('uri')) if request.POST.get('uri') else None
try:
redir = redir.group(0).split('=')[-1]
except Exception as e:
redir = '/'
return JsonResponse({'success': True, 'redirect': redir}, status=200)
else:
return JsonResponse({'success': False}, status=400)
@csrf_exempt
def planeks_logout(request):
logout(request)
return JsonResponse({'success': True, 'redirect': '/'}, status=200)
@csrf_protect
def make_csv(request):
if request.user.is_authenticated:
reportname = uuid.uuid4().hex + '.csv'
make_csv_task.delay(user=request.user.pk, reportname=reportname)
return JsonResponse({'success': True, 'filename': reportname}, status=200)
return JsonResponse({'success': False}, status=400)
@csrf_protect
def check_report(request):
if request.user.is_authenticated:
print(request.POST.get('filename'))
report = Report.objects.filter(owner=request.user, report=request.POST.get('filename'))
if report.exists() and report[0].status == Report.FINISH:
return JsonResponse({'success': True, 'link': '/media/' + os.path.split(report[0].report.path)[-1]}, status=200)
return JsonResponse({'success': False}, status=400)
@csrf_protect
def add_record(request):
if request.user.is_authenticated:
Dummy.objects.create(owner=request.user,
first_name=request.POST.get('first_name', None),
last_name=request.POST.get('last_name', None),
job=request.POST.get('job', None),
rec_type=request.POST.get('type', Dummy.ONE),
website=request.POST.get('website', None),
creation_date=request.POST.get('creation_date', None),
info=request.POST.get('info', None))
return JsonResponse({'success': True}, status=200)
return JsonResponse({'success': False}, status=400)
|
989,895 | ce7d917c16d1e744750b0c83ba281f08024c2e82 |
import numpy as np
import cv2
import pickle
import time
import calendar
import sys
import json
import os
import logging
import os.path
import pprint
log = None
pp = pprint.PrettyPrinter(indent=2)
def jdefault(o):
return o.__dict__
class WatchPoint:
y = 0
x = 0
isRed = 0
redStart = 0
def __str__(self):
return "WatchPoint(x:%d,y:%d,red:%s)" % (self.x,self.y,self.isRed)
def paint(self,frame,clipMSEC):
col = (255,0,0)
if self.isRed:
col = (0,0,255)
cv2.rectangle(frame,(self.x-20,100),(self.x+20,self.y-10),col,-1)
if self.isRed:
redFor = clipMSEC - self.redStart
cv2.rectangle(frame,(self.x+20,self.y-120),(self.x+300,self.y-10),(0,0,0),-1)
cv2.putText(frame, "%0.1f" % (redFor/1000),
(self.x+30,self.y-30), cv2.FONT_HERSHEY_DUPLEX, 3, (0,0,255,255), 4)
def processframe(self,frame):
if True:
pix = frame[self.y,self.x]
pixFLOAT = frame[self.y,self.x].astype(np.float64)
bgRatio = abs((pixFLOAT[0] / pixFLOAT[1]) - 1)
self.isRed = 0
if pix[2] > 192 and bgRatio < 0.25:
# red is strong, blue/green in close enough balance to not be confused with 'amber'
self.isRed = 1
if pix[0] > 192 and pix[1] > 192 and pix[2] > 192:
# almost white, so must be 'red light on' with max intensity
self.isRed = 1
if pix[0] < 128 and pix[1] < 128 and pix[2] > 192:
# red is just strong
self.isRed = 1
if pix[2] > 192 and (pixFLOAT[1] > pixFLOAT[0]) and bgRatio > 0.45:
# even if R component is high, big ratio means more likely to be amber
self.isRed = 0
return "(%d,%d) %s ratio: %.2f" % (self.x,self.y,pix,bgRatio)
if False:
pixelAt = frame[y,x]
log.info("%d :: %d :: (red:%d) bgr: %s" % (frameNum,i,pixelAt[2],pixelAt))
pixelAtRed = pixelAt[2]
if pixelAtRed > 256*0.75:
isRed = 1
else:
isRed = 0
if False:
# old "in red range" check
a = frame[y-5:y+5, x-5:x+5]
offset = i * 50;
redlow = (0,0,128)
redhigh = (255,255,255)
bigger_orig = cv2.resize(a, (0,0), fx=5, fy=5)
output = cv2.bitwise_and(a, a, mask = mask)
bigger_mask = cv2.resize(output, (0,0), fx=5, fy=5)
frame[offset:offset+50,0:50] = bigger_mask
frame[offset:offset+50,50:100] = bigger_orig
mask = cv2.inRange(a,redlow,redhigh)
if np.count_nonzero(mask) > 0:
isRed = 1
else:
isRed = 0
class VideoJob:
# location of red lights that are watched
watchPoints = []
# list of fully resolve input files
inFiles = []
# output files will bhe "output_[framenum]"
outputPrefix = "output_"
# none NONE for "do motion mask"
motionMask = None
motionKernel = None
motionBacksub = None
# Current video reader and index into inFiles that it is reading from
capIndex = None
cap = None
frameNum = -1 # global across all infiles
frameSkip = 3 # frames to skip between reads before returning a frame for processing
outFPS = 10 # FPS of output file (best result is "INPUT_FPS/frameSkip" (ie: 30/3 = 10)
localFrameNum = -1 # relative to current infile
# seconds of runtime of previous clips, so 'time' in top-left grows across everything
clipTotalTimeOffset = 0
last_CAP_PROP_POS_MSEC = 0
clipEndOffset = 10
inputFPS = 30
# as lights go to/from red, a snapshot of the current WatchPoints and frame data is added to this list.
stateChanges = []
# current video being written
vw_out = None
clipFilename = None
clipFirstFrame = None
# details of current output clip
clips = []
frame = None
frame2 = None
def closeVideoWriter(self):
log.info('Closing old video writer...')
self.vw_out.release()
self.clips.append({
"firstFrame" : self.clipFirstFrame,
"lastFrame" : self.frameNum,
"file" : self.clipFilename
})
def openVideoWriter(self):
if self.vw_out != None:
self.closeVideoWriter()
self.clipFirstFrame = self.frameNum
vid_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
vid_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out_size = (vid_width,vid_height)
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
self.vw_out = cv2.VideoWriter()
self.clipFilename = '%s_%.09d.mov' % (self.outputPrefix,self.frameNum)
log.info('Opening new video writer: %s' % self.clipFilename)
success = self.vw_out.open(self.clipFilename,fourcc,self.outFPS,out_size,True)
# TODO: handle failure
return
def writeFrame (self,frame):
if self.vw_out == None:
self.openVideoWriter()
self.vw_out.write(frame)
def readFrame (self):
if self.cap == None:
self.capIndex = 0
log.info('Initializing video reader: %s' % self.inFiles[self.capIndex].name)
self.cap = cv2.VideoCapture(self.inFiles[self.capIndex].name)
ret, frame = self.cap.read()
if ret == True:
self.frameNum += 1
self.localFrameNum += 1
self.last_CAP_PROP_POS_MSEC = self.cap.get(cv2.CAP_PROP_POS_MSEC)
return (True,frame)
log.info('No more frames left; capIndex: %d; moving to next file' % self.capIndex)
self.clipTotalTimeOffset += self.last_CAP_PROP_POS_MSEC/1000
log.info('clipTotalTimeOffset: %d' % self.clipTotalTimeOffset)
self.cap.release()
# capIndex is changed, so call self again and next frame should pop out
self.capIndex += 1
if (self.capIndex >= len(self.inFiles)):
log.info('No more inFiles to read from')
return (False,None)
log.info('Initializing video reader: %s' % self.inFiles[self.capIndex].name)
self.cap = cv2.VideoCapture(self.inFiles[self.capIndex].name)
self.localFrameNum = -1
return self.readFrame()
def saveStateChange(self):
data = {}
data['frame'] = self.frameNum;
wp = []
for i, w in enumerate(self.watchPoints):
wp.append({ "index" : i, "x" : w.x, "y" : w.y, "isRed" : w.isRed })
data['watchPoints'] = wp
self.stateChanges.append(data)
def backgroundFrameFinder(self):
cv2.namedWindow('frame')
cv2.moveWindow('frame',50,50)
while True:
ret, frame = self.readFrame()
if ret == False:
log.info('done reading input files')
break;
processframe(self.frameNum,frame,self.cap,self.watchPoints,self.clipTotalTimeOffset)
showFrame(frame)
def motion_based_background_image_finder(self):
cv2.namedWindow('frame')
cv2.namedWindow('mask')
cv2.namedWindow('bgf')
cv2.moveWindow('frame',10,10)
cv2.moveWindow('mask',400,10)
cv2.moveWindow('bgf',10,400)
bgf = None
while True:
ret, frame = self.readFrame()
if ret == False:
log.info('done reading input files')
break;
frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
if self.frameSkip > 0 and self.frameNum % self.frameSkip != 0:
# we aren't using every input frame, so skip skip skip
log.info('skipping frame')
continue
if self.motionKernel == None:
self.motionKernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2))
self.motionBacksub = cv2.createBackgroundSubtractorMOG2()
mask3 = np.zeros_like(frame)
#for x in range(0, 1):
self.motionBacksub.apply(mask3, None, 0.005)
if bgf == None:
bgf = np.zeros_like(frame)
mask = self.motionBacksub.apply(frame, None, 0.005)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.motionKernel)
mask = cv2.bitwise_not(mask)
for x in range(len(mask)):
for y in range(len(mask[x])):
cell = mask[x][y]
if (cell > 0):
bgfcell = bgf[x][y]
framecell = frame[x][y]
#print "(%d:%d) %d " % (x,y,cell)
if (bgfcell[0] == 0 and bgfcell[1] == 0 and bgfcell[2] == 0):
bgfcell[0] = framecell[0]
bgfcell[1] = framecell[1]
bgfcell[2] = framecell[2]
#cv2.imshow('frame',frame)
#cv2.imshow('mask',mask)
cv2.imshow('bgf',bgf)
cv2.waitKey(1)
#print frame.shape
self.writeFrame(frame)
# end of WhileTrue:
cv2.imwrite(self.outputPrefix,bgf)
log.info('closing last video writer')
def motion_picker_onto_background_mouse_click(self,event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
delta=30
print "click at x:%d y:%d" % (x,y)
#cv2.rectangle(self.frame,(x-delta,y-delta),(x+delta,y+delta),(0,255,0),3)
subset = self.frame[(y-delta):(y+delta),(x-delta):(x+delta)]
self.frame2[(y-delta):(y+delta),(x-delta):(x+delta)] = subset
#cv2.circle(self.frame,(x,y),radius,(255,0,0),-1)
#cv2.circle(self.frame2,(x,y),radius,(255,0,0),-1)
cv2.imshow('frame',self.frame)
cv2.imshow('frame2',self.frame2)
def motion_picker_onto_background(self,backgroundFile):
cv2.namedWindow('frame');
cv2.moveWindow('frame',0,0)
cv2.setMouseCallback('frame', self.motion_picker_onto_background_mouse_click)
cv2.namedWindow('frame2');
cv2.moveWindow('frame2',500,20)
log.info('Starting job')
self.motionKernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2))
self.motionBacksub = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = self.readFrame()
if ret == False:
log.info('done reading input files')
break;
if self.frameSkip > 0 and self.frameNum % self.frameSkip != 0:
# we aren't using every input frame, so skip skip skip
log.info('skipping frame')
continue
frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
self.frame = frame
self.frame2 = cv2.imread(backgroundFile)
cv2.imshow('frame',self.frame)
cv2.imshow('frame2',self.frame2)
cv2.waitKey(0)
outfile = '%s_%.09d.png' % (self.outputPrefix,self.frameNum)
cv2.imwrite(outfile,self.frame2)
#background = cv2.imread(backgroundFile)
#np.copyto(background,self.frame,casting='equiv',where=self.frame)
#cv2.imshow('frame2',background)
#cv2.waitKey(0)
# if self.frameNum >= cycleOutputAtFrameNum:
# cycleOutputAtFrameNum = self.frameNum + framesPerClip
# self.openVideoWriter()
# end of WhileTrue:
log.info('closing last video writer')
self.closeVideoWriter()
def clipper(self,framesPerClip):
log.info('Starting job')
cycleOutputAtFrameNum = framesPerClip
log.info('cycleOutputAtFrameNum: %d' % cycleOutputAtFrameNum)
cv2.namedWindow('frame')
cv2.moveWindow('frame',50,50)
while True:
ret, frame = self.readFrame()
if ret == False:
log.info('done reading input files')
break;
if self.frameSkip > 0 and self.frameNum % self.frameSkip != 0:
# we aren't using every input frame, so skip skip skip
#print frame
#frame.copyTo(background,frame)
#log.info('skipping frame')
continue
if self.motionMask:
if self.motionKernel == None:
self.motionKernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2))
self.motionBacksub = cv2.createBackgroundSubtractorMOG2()
mask = self.motionBacksub.apply(frame, None, 0.01)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.motionKernel)
mask3 = np.zeros_like(frame)
mask3[:,:,0] = mask
mask3[:,:,1] = mask
mask3[:,:,2] = mask
print frame.shape
print mask3.shape
frame = cv2.bitwise_and(frame,mask3)
processframe(self.frameNum,frame,self.cap,self.watchPoints,self.clipTotalTimeOffset)
showFrame(frame)
#key = cv2.waitKey(0)
#print frame.shape
self.writeFrame(frame)
if self.frameNum >= cycleOutputAtFrameNum:
cycleOutputAtFrameNum = self.frameNum + framesPerClip
log.info('cycleOutputAtFrameNum: %d' % cycleOutputAtFrameNum)
self.openVideoWriter()
# end of WhileTrue:
log.info('closing last video writer')
self.closeVideoWriter()
def runJob(self):
log.info('Starting job')
cycleOutputAtFrameNum = 999999999999
cv2.namedWindow('frame')
cv2.moveWindow('frame',50,50)
while True:
ret, frame = self.readFrame()
if ret == False:
log.info('done reading input files')
break;
exit(0)
if self.frameSkip > 0 and self.frameNum % self.frameSkip != 0:
# we aren't using every input frame, so skip skip skip
continue
stageChange = processframe(self.frameNum,frame,self.cap,self.watchPoints,self.clipTotalTimeOffset)
if self.crosshairs == 1:
for i, w in enumerate(self.watchPoints):
cv2.rectangle(frame,(w.x-20,w.y),(w.x+20,w.y),(0,0,255),-1)
cv2.rectangle(frame,(w.x,w.y-20),(w.x,w.y+20),(0,0,255),-1)
if stageChange:
self.saveStateChange()
allRed = 1
for i, w in enumerate(self.watchPoints):
if w.isRed == 0:
allRed = 0
log.info("stateChange,frame,%d,allRed,%d,time,%0.1f" % (self.frameNum,allRed,self.cap.get(cv2.CAP_PROP_POS_MSEC)/1000))
for i, w in enumerate(self.watchPoints):
log.info(w)
if allRed:
cycleOutputAtFrameNum = self.frameNum + (self.clipEndOffset*self.inputFPS)
self.writeFrame(frame)
showFrame(frame)
if self.frameNum >= cycleOutputAtFrameNum:
cycleOutputAtFrameNum = 999999999999
self.openVideoWriter()
# end of WhileTrue:
log.info('closing last video writer')
self.closeVideoWriter()
with open('%s_jobmetadata.pickle' % self.outputPrefix, 'wb') as f:
savedata = {
"clips" : self.clips,
"stateChanges" : self.stateChanges,
"watchPoints" : self.watchPoints
}
pickle.dump(savedata, f, pickle.HIGHEST_PROTOCOL)
def showFrame (frame):
frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
cv2.imshow('frame',frame)
key = cv2.waitKey(1)
def unittest (conffile):
cv2.namedWindow('frame')
cv2.moveWindow('frame',100,100)
log.info("reading json file: %s" % conffile)
f = open(conffile,'r')
conf = json.loads(f.read())
mp4file = "%s/%s" % (os.path.dirname(conffile),conf['video'])
watchPoints = []
for w in conf['watchpoints']:
wp = WatchPoint()
wp.x = w['x'];
wp.y = w['y'];
watchPoints.append(wp)
cap = cv2.VideoCapture(mp4file)
frameNum = 0
ret, frame = cap.read()
if ret == False:
log.info("unexpected end of file")
exit(-1)
for t in conf['tests']:
print ""
print "test: %s" % t
print ""
print "scanning to frame %d" % t['frame']
while frameNum < t['frame']:
ret, frame = cap.read()
frameNum += 1
if ret == False:
print "unexpected end of file"
exit(-1)
for i in range(0,len(t['red'])):
wp = watchPoints[i]
debug = wp.processframe(frame)
print "frameNum:%d passed:na index:%d detectedRed:%d debug >>> %s" % (frameNum,i,wp.isRed,debug)
wp.paint(frame,cap.get(cv2.CAP_PROP_POS_MSEC))
t['failed'] = 0
for i in range(0,len(t['red'])):
wp = watchPoints[i]
red = t['red'][i]
debug = wp.processframe(frame)
passed = red == wp.isRed
print "frameNum:%d passed:%d index:%d shouldBeRed:%d detectedRed:%d debug >>> %s" % (frameNum,passed,i,red,wp.isRed,debug)
wp.paint(frame,cap.get(cv2.CAP_PROP_POS_MSEC))
if passed == 0:
t['failed'] = 1
paint(frameNum,"failed:%d" % t['failed'],frame,cap)
smallframe = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
t['frame'] = smallframe
#cv2.imshow('frame',smallframe)
#key = cv2.waitKey(1)
#if failed == 1:
# key = cv2.waitKey()
print "DONE PARSING..."
while True:
failCount = 0
for t in conf['tests']:
if t['failed'] == 1:
failCount += 1
cv2.imshow('frame',t['frame'])
key = cv2.waitKey(500)
print "DONE PARSING; failCount: %d" % failCount
if failCount == 0:
exit(0)
def logger ():
global log
if log != None:
return log
lgr = logging.getLogger(__name__)
lgr.setLevel(logging.DEBUG)
frmt = logging.Formatter("%(asctime)s %(levelname)s %(name)s.%(funcName)s %(message)s")
#fh = logging.FileHandler('tv-%d.log' % now)
#fh.setLevel(logging.DEBUG)
#fh.setFormatter(frmt)
#lgr.addHandler(fh)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(frmt)
lgr.addHandler(sh)
log = lgr;
return log
def paint (frameNum,frame,cap,clipTimeOffset):
vid_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
vid_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
seconds = clipTimeOffset+(cap.get(cv2.CAP_PROP_POS_MSEC)/1000)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
time = "%d:%02d:%02.1f" % (h, m, s)
cv2.rectangle(frame,(0,0),(vid_width,100),(0,0,0),-1)
cv2.putText(frame, "#%d %s" % (frameNum,time), (50,80), cv2.FONT_HERSHEY_DUPLEX, 2, (255,255,255,255), 4)
#cv2.putText(frame, "#%d" % (frameNum), (50,80), cv2.FONT_HERSHEY_DUPLEX, 2, (255,255,255,255), 4)
def processframe (frameNum,frame,cap,watchPoints,clipTimeOffset):
stageChange = 0
paint(frameNum,frame,cap,clipTimeOffset)
for i, w in enumerate(watchPoints):
prevRed = w.isRed
w.processframe(frame)
same = (w.isRed == prevRed)
if same == 0:
stageChange = 1
if w.isRed:
w.redStart = cap.get(cv2.CAP_PROP_POS_MSEC)
else:
w.redStart = None
log.info("WP(%d) state changed; frame:%d prevRed:%d isRed:%d :: %s" % (i,frameNum,prevRed,w.isRed,w))
w.paint(frame,cap.get(cv2.CAP_PROP_POS_MSEC))
return stageChange
log = logger()
|
989,896 | 2137d27869e359dc9f466f9a3e463beb33762e4e | m = int(input("How many sides does the first die have? "))
n = int(input("How many sides does the second die have? "))
c = 0
if 1 > m > 1000 and 1 > n > 1000:
print("Error");
else:
for i in range(1, m+1):
if 10 - i <= n and 10 - i > 0:
c = c + 1
i = i + 1
elif 10 - i > n:
c = c + 0
if c == 1:
print("There is " + str(c) + " way to get the sum 10");
else:
print("There are " + str(c) + " ways to get the sum 10");
|
989,897 | 0c22ef9d89427597b07c20b2c142250ac668955f | """
List of Consecutive Numbers
Implement a function that returns a list containing all the consecutive numbers in ascendant order from the given value low up to the given value high (bounds included).
https://edabit.com/challenge/Hdthzjmr5fRqEX93E
"""
from typing import List
def get_sequence(low: int, high: int) -> List[int]:
return list(range(low, high + 1))
|
989,898 | c816c44b4dba15eda9875a722095cf512b8f715b | def score(a):
return 35 * a[0] + 45 * a[1] + 20 * a[2]
T = int(input())
for tc in range(1, T+1):
N, K = map(int, input().split())
arr = [[] for _ in range(N)]
score_list = []
for i in range(N):
arr[i] = list(map(int, input().split()))
score_list.append(score(arr[i]))
# 순위 매기기
# 이게 순위 매기는 좋은 방법일 것인가? 이중 포문이라 효율적이지는 않은 것 같다
rank = [0] * N
for i in range(N):
temp = 0
for j in range(N):
if i != j:
if score_list[i] > score_list[j]:
temp += 1
rank[i] = N - temp
a_score = ['A+', 'A0', 'A-', 'B+', 'B0', 'B-', 'C+', 'C0', 'C-', 'D']
# K 번째 학생의 랭크 = rank(K-1)
print('#{} '.format(tc), end='')
if rank[K-1] < N * 0.1:
print('A+')
elif rank[K-1] <= N * 0.2:
print('A0')
elif rank[K-1] <= N * 0.3:
print('A-')
elif rank[K-1] <= N * 0.4:
print('B+')
elif rank[K-1] <= N * 0.5:
print('B0')
elif rank[K-1] <= N * 0.6:
print('B-')
elif rank[K-1] <= N * 0.7:
print('C+')
elif rank[K-1] <= N * 0.8:
print('C0')
elif rank[K-1] <= N * 0.9:
print('C-')
else:
print('D')
# print(arr)
# print(score_list)
# print(rank)
# print(rank[K-1]) |
989,899 | 07d636bc4909ab275de83f6342d28d256198e171 | """
Creates permissions for all installed apps that need permissions.
"""
from django.db.models import get_models, signals
from hipercic.hipercore.authenticore import models as auth_app
def _get_permission_codename(action, opts):
return u'%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts):
"Returns (codename, name) for all permissions in the given opts."
perms = []
for action in ('add', 'change', 'delete'): # Modifying_permissions
perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw)))
return perms + list(opts.permissions)
def create_permissions(app, created_models, verbosity, **kwargs):
from hipercic.hipercore.authenticore.models import Permission, App
#from hipercic.apps.appConfig import ACTIVE_APPS
from hipercic.hipercore.permissions import all_perms
apps = App.objects.all()
perms= all_perms()
for app in apps:
for perm in perms:
p, created = Permission.objects.get_or_create(name=perm['title'], app=app)
if created:
print p.name + "|" + p.app.name
def create_superuser(app, created_models, verbosity, **kwargs):
from hipercic.hipercore.authenticore.models import User
from django.core.management import call_command
if User in created_models and kwargs.get('interactive', True):
msg = "\nYou just installed Django's authenticore system, which means you don't have " \
"any admin defined.\nWould you like to create one now? (YES/no): "
confirm = raw_input(msg)
while 1:
if confirm == '':
confirm = 'yes'
if confirm not in ('yes', 'no'):
confirm = raw_input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True)
break
def load_active_apps(**kwargs):
from hipercic.apps.appConfig import ACTIVE_APPS
from hipercic.hipercore.authenticore.models import App
for app in ACTIVE_APPS:
a, created = App.objects.get_or_create(name=app, is_active=True) #put each app into the database...
if created:
print "Added app '%s'" % a
signals.post_syncdb.connect(load_active_apps,
dispatch_uid = "hipercic.hipercore.authenticore.management.load_active_apps")
signals.post_syncdb.connect(create_permissions,
dispatch_uid = "hipercic.hipercore.authenticore.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid = "hipercic.hipercore.authenticore.management.create_superuser")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.