index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
14,900 | af10f9032c020c97d0a073f5c3aafc79aa963d42 | #!/usr/bin/env python3
import sys
import re
def indent(line):
return len(line) - len(line.lstrip(" "))
def main():
list_re = re.compile("^ *([0-9]+.)")
stack = [1]
indent_stack = [0]
for line in sys.stdin:
m = list_re.match(line)
if not m:
print(line, end="")
continue
current_indent = indent(line)
if current_indent > indent_stack[-1]:
stack.append(1)
indent_stack.append(current_indent)
if current_indent < indent_stack[-1]:
while indent_stack[-1] > current_indent:
indent_stack.pop()
stack.pop()
group = m.group(1)
end = m.end()
start = end - len(group)
print(line[:start] + f"{stack[-1]}." + line[end:], end="")
stack[-1] += 1
if __name__ == "__main__":
main()
|
14,901 | 7444bdeb99a64c39ca6675ab0d58fdc0ada6736f | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import argparse
from Tensor.real_tensor import RV_tensor
from Tensor.binary_tensor import binary_tensor
from Tensor.count_tensor import count_tensor
import numpy as np
from SSVI.SSVI_TF_robust import SSVI_TF_robust
from SSVI.SSVI_TF_d import SSVI_TF_d
from SSVI.SSVI_TF_simple import SSVI_TF_simple
# np.random.seed(seed=319)
default_params = {"mean_update" : "S", "cov_update" : "N", "rank" : 20}
def get_factorizer_param(model, datatype, diag, using_quadrature):
set_params = {"eta" : 1, "cov_eta": 0.001}
# Keep it consistent across all models
return set_params
def get_init_values(datatype, D):
cov0 = np.eye(D)
if datatype == "real":
mean0 = np.ones((D,))
elif datatype == "binary":
mean0 = np.zeros((D,))
#
else:
mean0 = np.ones((D,)) * 0
# TODO: does starting position matter
return {"cov0" : cov0, "mean0" : mean0}
def synthesize_tensor(dims, datatype, using_ratio, noise):
#dims = [50, 50, 50]
#dims = [25, 25, 25]
real_dim = 100
means = [np.ones((real_dim,)) * 5, np.ones((real_dim,)) * 10, np.ones((real_dim,)) * 2]
covariances = [np.eye(real_dim) * 2, np.eye(real_dim) * 3, np.eye(real_dim) * 2]
if datatype == "binary":
tensor = binary_tensor()
elif datatype == "real":
tensor = RV_tensor()
elif datatype == "count":
tensor = count_tensor()
"""
"""
tensor.synthesize_data(dims, means, covariances, real_dim, \
train=0.8, sparsity=1, noise=noise, noise_ratio=using_ratio)
return tensor
def synthesize_matrix(dims, datatype, noise_ratio, noise_amount):
real_dim = 100
means = [np.ones((real_dim,)) * 5, np.ones((real_dim,)) * 2]
covariances = [np.eye(real_dim) * 2, np.eye(real_dim) * 3]
if datatype == "binary":
tensor = binary_tensor()
elif datatype == "real":
tensor = RV_tensor()
else:
tensor = count_tensor()
tensor.synthesize_data(dims, means, covariances, real_dim, \
train=0.8, sparsity=1, noise=noise_amount, noise_ratio=noise_ratio)
return tensor
parser = argparse.ArgumentParser(description="Testing models at specific training size")
parser.add_argument("-m", "--model", type=str, help="model of factorizer", choices=["deterministic", "simple", "robust"])
parser.add_argument("-d", "--datatype", type=str, help="datatype of tensor", choices=["real", "binary", "count"])
excl_group = parser.add_mutually_exclusive_group()
excl_group.add_argument("-r", "--ratio", type=float, help="noise as ratio")
excl_group.add_argument("-n", "--noise", type=float, help="noise level")
parser.add_argument("--diag", action="store_true")
parser.add_argument("-tr", "--train_size", type=float, help="portion of training data")
parser.add_argument("--fixed_cov", action="store_true", help="Fixed covariance")
parser.add_argument("-it", "--num_iters", type=int, help="Max number of iterations", default=8000)
parser.add_argument("-re", "--report", type=int, help="Report interval", default=500)
parser.add_argument("--quadrature", action="store_true", help="using quadrature")
parser.add_argument("--matrix", action="store_true", help="Doing matrix factorization instead of tensor factorization")
parser.add_argument("-ceta", "--cov_eta", type=float, help="cov eta", default=1.0)
parser.add_argument("--rand", action="store_true", help="Using random start")
parser.add_argument("-meta", "--mean_eta", type=float, help="mean eta", default=1.0)
parser.add_argument("-dim", "--dimension", nargs='+', required=True, default=[50, 50, 50])
parser.add_argument("-k1", "--k1", type=int, help="k1 samples", default=64)
parser.add_argument("-k2", "--k2", type=int, help="k2 samples", default=128)
args = parser.parse_args()
model = args.model
datatype = args.datatype
D = 20
diag = args.diag # full or diagonal covariance
NOISE_RATIO = args.ratio # using noise as ratio of f
NOISE_AMOUNT = args.noise # Noise amount
# Should be exclusive group
default_params["diag"] = diag
fixed_covariance = args.fixed_cov
using_quadrature = args.quadrature
randstart = args.rand
dims = [int(x) for x in args.dimension]
if NOISE_RATIO is not None:
using_ratio = True
noise = args.ratio
elif NOISE_AMOUNT is not None:
using_ratio = False
noise = args.noise
else:
using_ratio = True
noise = 0
if len(dims) == 3:
synthetic_tensor = synthesize_tensor(dims, datatype, using_ratio, noise)
elif len(dims) == 2:
synthetic_tensor = synthesize_matrix(dims, datatype, using_ratio, noise)
else:
raise Exception("Have not implemented the necessary dimensions")
factorizer_param = get_factorizer_param(model, datatype, diag, using_quadrature)
init_vals = get_init_values(datatype, D)
params = {**default_params, **factorizer_param, **init_vals, "tensor" : synthetic_tensor }
params["cov_eta"] = args.cov_eta
params["eta"] = args.mean_eta
params["randstart"] = randstart
params["cov0"] = np.eye(D)*0.1
params["k1"] = args.k1
params["k2"] = args.k2
if model == "deterministic":
factorizer = SSVI_TF_d(**params)
elif model == "simple":
factorizer = SSVI_TF_simple(**params)
elif model == "robust":
factorizer = SSVI_TF_robust(**params)
portion = args.train_size
factorizer.evaluate_true_params()
if portion is not None:
synthetic_tensor.reduce_train_size(portion)
max_iterations = args.num_iters
#if datatype == "count" and model != "robust" and args.matrix:
# max_iterations = 30000
factorizer.factorize(report=args.report, max_iteration=max_iterations, fixed_covariance=fixed_covariance, to_report=[0, 5, 10, 20, 50, 100, 200])
|
14,902 | b21386b7974954e86fad49980d258f7162b430d5 | import time
import numpy as np
import math
from numpy.random import rand
import matplotlib.pyplot as plt
import matplotlib as mpl
class Up_down_flip_model():
def __init__(self,N,step,label='simple flip model'):
self.N = N
self.step=step
self.config = self.init_grid(N)
self.J = self.init_J(N)
self.label = label
def init_grid(self,N):
theta = np.random.randint(2,size=(N,N))*math.pi
theta = theta[np.newaxis, :]
beta = np.zeros((N,N))
beta = beta[np.newaxis, :]
return np.concatenate((theta,beta),axis=0)
def cal_cartesion(self,grid):
theta,beta = grid[0,:,:], grid[1,:,:]
x = np.sin(theta)*np.cos(beta)
y = np.sin(theta)*np.sin(beta)
z = np.cos(theta)
N = self.N
return np.concatenate([x,y,z],0).reshape([3,N,N])
def init_J(self,N):
''' generates a random connection parameter for initial condition'''
# np.random.rand(2,N,N) 表示产生2*3*N*N随机0-1的数,返回为列表,# x,y,z 方向作用参数 一样
np.random.seed(2021)
# -1 to 1
J = 2*(np.random.rand(2,N, N)-0.5)
np.random.seed()
return J
def calculate_single_energy(self,grid, a, b, J,lamda):
# a,b 是 x 和 y的坐标
N = self.N
coordination = self.cal_cartesion(grid)
s = coordination[:,a,b] # 选了一个点,包含x,y,z
E = coordination[:,(a+1) % N,b]*J[0,(a+1) % N,b] + \
coordination[:,a,(b+1) % N]*J[1,a,b] + \
coordination[:,(a-1) % N,b]*J[0,a,b] + \
coordination[:,a,(b-1) % N]*J[1,a,(b-1)%N]
Energy = s*E
single_point_energy = np.sum(Energy)/2+lamda*np.square(np.square(coordination[2,a,b])-1)
return single_point_energy
def calculate_total_energy(self,config,lamda):
N = self.N
total_energy = 0
for a in range(N):
for b in range(N):
total_energy += self.calculate_single_energy(config, a, b, self.J,lamda)
return total_energy
def calculate_new_direction_energy(self,grid,a,b,J,new_theta,new_beta,lamda):
new_grid = np.copy(grid)
new_grid[:,a,b] = [new_theta,new_beta]
return self.calculate_single_energy(new_grid,a,b,J,lamda)
def flipping_random_new_direction(self,lamda,temperature):
'''Monte Carlo move using Metropolis algorithm '''
for i in range(self.N):
for j in range(self.N):
# 产生0到N-1的随机数
a = np.random.randint(0, self.N)
b = np.random.randint(0, self.N)
old_theta = self.config[0,a,b]
new_theta = (old_theta/math.pi+1)%2*math.pi # 0 to pi
new_beta = 0 # 0
origin_energy = self.calculate_single_energy(self.config, a, b, self.J,lamda)
later_energy = self.calculate_new_direction_energy(self.config,a,b,self.J,new_theta,new_beta,lamda)
cost = 2*(later_energy - origin_energy) *2
# 如果能量降低接受翻转
if cost < 0:
self.config[:,a,b] = [new_theta,new_beta]
elif rand() < np.exp(-cost/temperature):
self.config[:,a,b] = [new_theta,new_beta]
def simulation(self,lamda = 0,temperature=2):
Energy = [] # 内能
# 开始模拟
self.lowest_energy = np.copy(self.calculate_total_energy(self.config,lamda=0))
self.lowest_config = np.copy(self.config)
time_start = time.time()
for i in range(self.step):
self.flipping_random_new_direction(lamda = lamda,temperature=temperature)
e = self.calculate_total_energy(self.config,lamda=0)
if e < self.lowest_energy:
self.lowest_energy = np.copy(np.copy(self.calculate_total_energy(self.config,lamda=0)))
self.lowest_config = np.copy(self.config)
Energy.append(e)
if i % 300 == 0:
print("已完成第%d步模拟" % i)
time_end = time.time()
print('totally cost', time_end-time_start)
average_energy = [i/np.square(self.N) for i in Energy]
self.energy = Energy
self.average_energy = average_energy
plt.plot(average_energy,label=self.label)
plt.legend()
def convert_config_to_up_down(self):
self.lowest_config[0,:,:][self.lowest_config[0,:,:]<(math.pi/2)] = 0
self.lowest_config[0,:,:][self.lowest_config[0,:,:]>(math.pi/2)] = math.pi
def plot_spin_z(self):
N = self.N
cartesion = self.cal_cartesion(self.lowest_config)
z_direction_config = cartesion[2,:,:]
x_position = np.linspace(0,1,N)
y_position = np.linspace(0,1,N)
m_x = np.zeros([N,N])
m_y = z_direction_config
x_position,y_position = np.meshgrid(x_position, y_position)
plt.figure(figsize=(N,N))
ax = plt.subplot(1, 1, 1)
color = m_y
map_range=[-1, 1]
norm = mpl.colors.Normalize(vmin=map_range[0], vmax=map_range[1])
colormap = mpl.cm.bwr
color_map = colormap(norm(color))
color_map = color_map.reshape([-1, 4])
quiver = ax.quiver(x_position, y_position, m_x, m_y,color=color_map,
angles='xy', pivot='mid', scale=10) |
14,903 | cd9314f1e5a94d4758cb5ae5f27e4321482cd348 |
# -*- coding: utf_8 -*-
# Copyright (C) 2012 Olaf Radicke
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import sqlite3
import web
from web import form
import HtmlTemplate
import index
import belieftest
import datenbasis
import participate
import sqlgenerator
urls = (
'/', 'index.index',
'/test', 'belieftest.belieftest',
'/datenbasis','datenbasis.datenbasis',
'/participate','participate.participate',
'/sqlgenerator','sqlgenerator.sqlgenerator')
app = web.application(urls, globals())#, web.reloader)
render = web.template.render('templates/')
htemp = HtmlTemplate.HtmlTemplate()
if __name__ == "__main__":
app.run()
|
14,904 | 7f592044e6f7b0a1927a4f080a31581f1e269ee3 | import copy
# Constants
X = 'X'
O = 'O'
class Tictactoe:
"""
Tictactoe contains the game model.
All the function related to the game's structure is in this class.
"""
def __init__(self):
"""
Initializes a new Tictactoe game. Creates player and board.
"""
# Current player
self.player = X
# Board
self.board = [
[None, None, None],
[None, None, None],
[None, None, None]
]
# Winner
self.winner = None
# Game over
self._gameover = False
def get_board(self):
"""
Get function for board
Returns:
Nested list (3 x 3): board
"""
return copy.deepcopy(self.board)
def get_winner(self):
"""Get function for winner
Returns:
str: 'X' or 'O'
"""
return self.winner
def get_player(self):
"""
Get function for player
Returns:
str: 'X' or 'O'
"""
return self.player
@staticmethod
def available_actions(board):
"""
Finds all the available actions for the given board
Args:
board (nested list (3 x 3)): a Tictactoe board
Returns:
list of tuples (i, j): the available actions
"""
actions = []
# Loop through the board
for i in range(3):
for j in range(3):
if board[i][j] == None:
actions.append((i, j))
return actions
def is_valid_action(self, action):
"""
Checks if the action is valid
Args:
action (tuple (i, j)): coordinates of a cell
Returns:
bool: True if the action is valid, False otherwise
"""
if self.board[action[0]][action[1]] == None:
return True
return False
def action(self, action):
"""
Performs the given action if valid
Args:
action (tuple (i, j)): coordinates of a cell
Raises:
Exception: If action is invalid
"""
if self.is_valid_action(action):
# Modify the board
self.board[action[0]][action[1]] = self.player
# Switch player
self.player = X if self.player == O else O
else:
raise Exception('Invalid action')
def terminal(self):
"""
Check s if the curretn state is terminal
Returns:
bool: True if terminal, False otherwise
"""
# Horizontal check
for i in range(3):
b_ = True
for j in range(2):
if self.board[i][j] == None or self.board[i][j] != self.board[i][j + 1]:
b_ = False
if b_:
self.winner = self.board[i][0]
return True
# Vertical check
for j in range(3):
b_ = True
for i in range(2):
if self.board[i][j] == None or self.board[i][j] != self.board[i + 1][j]:
b_ = False
if b_:
self.winner = self.board[0][j]
return True
# Diagonal check
if self.board[1][1] != None:
if self.board[0][0] == self.board[1][1] == self.board[2][2]:
self.winner = self.board[1][1]
return True
if self.board[2][0] == self.board[1][1] == self.board[0][2]:
self.winner = self.board[1][1]
return True
# Draw check
if sum([row.count(None) for row in self.board]) == 0:
self.winner = None
return True
return False
def gameover(self):
"""
Checks if the game is over
Returns:
bool: True if game over, False otherwise
"""
if self._gameover:
return True
if self.terminal():
self._gameover = True
return True
return False
|
14,905 | 9995560e51e9805a48805ab523ae0befb2b5eed7 | # Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 and
# only version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import re
import os
import struct
import datetime
import array
import string
import bisect
import traceback
from subprocess import *
from optparse import OptionParser
from optparse import OptionGroup
from struct import unpack
from ctypes import *
from print_out import *
FP = 11
SP = 13
LR = 14
PC = 15
THREAD_SIZE = 8192
class Stackframe () :
def __init__(self, fp, sp, lr, pc) :
self.fp = fp
self.sp = sp
self.lr = lr
self.pc = pc
class UnwindCtrlBlock () :
def __init__ (self) :
self.vrs = 16*[0]
self.insn = 0
self.entries = -1
self.byte = -1
self.index = 0
class Unwinder () :
def __init__(self, ramdump) :
start = ramdump.addr_lookup("__start_unwind_idx")
end = ramdump.addr_lookup("__stop_unwind_idx")
if (start is None) or (end is None) :
print_out_str ("!!! Could not lookup unwinding information")
return None
# addresses
self.start_idx = start
self.stop_idx = end
self.unwind_table = []
self.ramdump = ramdump
i = 0
for addr in range(start,end,8) :
(a,b) = ramdump.read_string(addr,"<II")
self.unwind_table.append((a,b,start+8*i))
i+=1
ver = ramdump.version
if re.search('3.0.\d',ver) is not None :
self.search_idx = self.search_idx_3_0
else :
self.search_idx = self.search_idx_3_4
# index into the table
self.origin = self.unwind_find_origin()
def unwind_find_origin(self) :
start = 0
stop = len(self.unwind_table)
while (start < stop) :
mid = start + ((stop - start) >> 1)
if (self.unwind_table[mid][0] >= 0x40000000) :
start = mid + 1
else :
stop = mid
return stop
def unwind_frame_generic(self, frame) :
high = 0
fp = frame.fp
low = frame.sp
mask = (THREAD_SIZE) - 1
high = (low + mask) & (~mask) #ALIGN(low, THREAD_SIZE)
# /* check current frame pointer is within bounds */
if (fp < (low + 12) or fp + 4 >= high) :
return -1
fp_is_at = self.ramdump.read_word(frame.fp-12)
sp_is_at = self.ramdump.read_word(frame.fp-8)
pc_is_at = self.ramdump.read_word(frame.fp-4)
frame.fp = fp_is_at
frame.sp = sp_is_at
frame.pc = pc_is_at
return 0
def walk_stackframe_generic(self, frame) :
while True :
symname = self.ramdump.addr_to_symbol(frame.pc)
print_out_str (symname)
ret = self.unwind_frame_generic(frame)
if ret < 0 :
break
def unwind_backtrace_generic(self, sp, fp, pc) :
frame = Stackframe()
frame.fp = fp
frame.pc = pc
frame.sp = sp
walk_stackframe_generic(frame)
def search_idx_3_4(self, addr) :
start = 0
stop = len(self.unwind_table)
orig = addr
if (addr < self.start_idx) :
stop = self.origin
else :
start = self.origin
addr = (addr - self.unwind_table[start][2]) & 0x7fffffff
while (start < (stop - 1)) :
mid = start + ((stop - start) >> 1)
dif = (self.unwind_table[mid][2] - self.unwind_table[start][2])
if ((addr - dif) < self.unwind_table[mid][0]) :
stop = mid
else :
addr = addr - dif
start = mid
if self.unwind_table[start][0] <= addr :
return self.unwind_table[start]
else :
return None
def search_idx_3_0(self, addr) :
first = 0
last = len(self.unwind_table)
while (first < last - 1) :
mid = first + ((last - first + 1) >> 1)
if (addr < self.unwind_table[mid][0]) :
last = mid
else :
first = mid
return self.unwind_table[first]
def unwind_get_byte(self, ctrl) :
if (ctrl.entries <= 0) :
print_out_str("unwind: Corrupt unwind table")
return 0
val = self.ramdump.read_word(ctrl.insn)
ret = (val >> (ctrl.byte * 8)) & 0xff
if (ctrl.byte == 0) :
ctrl.insn+=4
ctrl.entries-=1
ctrl.byte = 3
else :
ctrl.byte-=1
return ret
def unwind_exec_insn(self, ctrl, trace = False) :
insn = self.unwind_get_byte(ctrl)
if ((insn & 0xc0) == 0x00) :
ctrl.vrs[SP] += ((insn & 0x3f) << 2) + 4
if trace :
print_out_str (" add {0} to stack".format(((insn & 0x3f) << 2) + 4))
elif ((insn & 0xc0) == 0x40) :
ctrl.vrs[SP] -= ((insn & 0x3f) << 2) + 4
if trace :
print_out_str (" subtract {0} from stack".format(((insn & 0x3f) << 2) + 4))
elif ((insn & 0xf0) == 0x80) :
vsp = ctrl.vrs[SP]
reg = 4
insn = (insn << 8) | self.unwind_get_byte(ctrl)
mask = insn & 0x0fff
if (mask == 0) :
print_out_str ("unwind: 'Refuse to unwind' instruction")
return -1
# pop R4-R15 according to mask */
load_sp = mask & (1 << (13 - 4))
while (mask) :
if (mask & 1) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
mask >>= 1
reg+=1
if not load_sp :
ctrl.vrs[SP] = vsp
elif ((insn & 0xf0) == 0x90 and (insn & 0x0d) != 0x0d) :
if trace :
print_out_str (" set SP with the value from {0}".format(insn & 0x0f))
ctrl.vrs[SP] = ctrl.vrs[insn & 0x0f]
elif ((insn & 0xf0) == 0xa0) :
vsp = ctrl.vrs[SP]
a = list(range(4,4 + (insn & 7)))
a.append(4 + (insn & 7))
# pop R4-R[4+bbb] */
for reg in (a) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
if (insn & 0x80) :
if trace :
print_out_str (" set LR from the stack")
ctrl.vrs[14] = self.ramdump.read_word(vsp)
if ctrl.vrs[14] is None :
return -1
vsp+=4
ctrl.vrs[SP] = vsp
elif (insn == 0xb0) :
if trace :
print_out_str (" set pc = lr")
if (ctrl.vrs[PC] == 0) :
ctrl.vrs[PC] = ctrl.vrs[LR]
ctrl.entries = 0
elif (insn == 0xb1) :
mask = self.unwind_get_byte(ctrl)
vsp = ctrl.vrs[SP]
reg = 0
if (mask == 0 or mask & 0xf0) :
print_out_str ("unwind: Spare encoding")
return -1
# pop R0-R3 according to mask
while mask :
if (mask & 1) :
ctrl.vrs[reg] = self.ramdump.read_word(vsp)
if trace :
print_out_str (" pop r{0} from stack".format(reg))
if ctrl.vrs[reg] is None :
return -1
vsp+=4
mask >>= 1
reg+=1
ctrl.vrs[SP] = vsp
elif (insn == 0xb2) :
uleb128 = self.unwind_get_byte(ctrl)
if trace :
print_out_str (" Adjust sp by {0}".format(0x204 + (uleb128 << 2)))
ctrl.vrs[SP] += 0x204 + (uleb128 << 2)
else :
print_out_str ("unwind: Unhandled instruction")
return -1
return 0
def prel31_to_addr(self, addr) :
value = self.ramdump.read_word(addr)
# offset = (value << 1) >> 1
# C wants this sign extended. Python doesn't do that.
# Sign extend manually.
if (value & 0x40000000) :
offset = value | 0x80000000
else :
offset = value
# This addition relies on integer overflow
# Emulate this behavior
temp = addr + offset
return (temp & 0xffffffff) + ((temp >> 32) & 0xffffffff)
def unwind_frame(self, frame, trace = False) :
low = frame.sp
high = ((low + (THREAD_SIZE - 1)) & ~(THREAD_SIZE - 1)) + THREAD_SIZE
idx = self.search_idx(frame.pc)
if (idx is None) :
if trace :
print_out_str ("can't find %x" % frame.pc)
return -1
ctrl = UnwindCtrlBlock()
ctrl.vrs[FP] = frame.fp
ctrl.vrs[SP] = frame.sp
ctrl.vrs[LR] = frame.lr
ctrl.vrs[PC] = 0
if (idx[1] == 1) :
return -1
elif ((idx[1] & 0x80000000) == 0) :
ctrl.insn = self.prel31_to_addr(idx[2]+4)
elif (idx[1] & 0xff000000) == 0x80000000 :
ctrl.insn = idx[2]+4
else :
print_out_str ("not supported")
return -1
val = self.ramdump.read_word(ctrl.insn)
if ((val & 0xff000000) == 0x80000000) :
ctrl.byte = 2
ctrl.entries = 1
elif ((val & 0xff000000) == 0x81000000) :
ctrl.byte = 1
ctrl.entries = 1 + ((val & 0x00ff0000) >> 16)
else :
return -1
while (ctrl.entries > 0) :
urc = self.unwind_exec_insn(ctrl, trace)
if (urc < 0) :
return urc
if (ctrl.vrs[SP] < low or ctrl.vrs[SP] >= high) :
return -1
if (ctrl.vrs[PC] == 0) :
ctrl.vrs[PC] = ctrl.vrs[LR]
# check for infinite loop */
if (frame.pc == ctrl.vrs[PC]) :
return -1
frame.fp = ctrl.vrs[FP]
frame.sp = ctrl.vrs[SP]
frame.lr = ctrl.vrs[LR]
frame.pc = ctrl.vrs[PC]
return 0
def unwind_backtrace(self, sp, fp, pc, lr, extra_str = "", out_file = None, trace = False) :
offset = 0
frame = Stackframe(fp, sp, lr, pc)
frame.fp = fp
frame.sp = sp
frame.lr = lr
frame.pc = pc
while True :
where = frame.pc
offset = 0
r = self.ramdump.unwind_lookup(frame.pc)
if r is None :
symname = "UNKNOWN"
offset = 0x0
else :
symname, offset = r
pstring = (extra_str+"[<{0:x}>] {1}+0x{2:x}".format(frame.pc, symname, offset))
if out_file :
out_file.write (pstring+"\n")
else :
print_out_str (pstring)
urc = self.unwind_frame(frame, trace)
if urc < 0 :
break
|
14,906 | 463ef3e4e52ed5f46304fea7e759a72c8128fa9d | # A Floater is Prey; it updates by moving mostly in
# a straight line, but with random changes to its
# angle and speed, and displays as ufo.gif (whose
# dimensions (width and height) are computed by
# calling .width()/.height() on the PhotoImage
from PIL.ImageTk import PhotoImage
from prey import Prey
from random import random
class Floater(Prey):
radius = 5
def __init__(self, x, y):
Prey.randomize_angle(self) # Ball for random angles
Prey.__init__(self, x, y, 10, 10, Prey.get_angle(self), 5)
def update(self, model):
if random() > .8:
speed = 5*random()
if speed < 3:
speed = 3
elif speed > 7:
speed = 7
angle = random() - random()
if angle > 0.5:
angle = 0.5
elif angle < -0.5:
angle = 0.5
Prey.set_speed(self,speed)
Prey.set_angle(self,Prey.get_angle(self)+angle)
Prey.move(self)
def display(self, canvas):
x, y = self.get_location()
canvas.create_oval(x - Floater.radius, y - Floater.radius,
x + Floater.radius, y + Floater.radius,
fill="red")
|
14,907 | c241a10734d6540a4afa4116f9d7a84546ab40cf | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from SR_Dataset import SR_Dataset, ToTensor
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("Agg")
import earthpy.plot as ep
import skimage.metrics as skm
from torchvision import datasets, transforms
from torch.utils.tensorboard import SummaryWriter
import os
logs_base_dir = "runs"
os.makedirs(logs_base_dir, exist_ok=True)
tb = SummaryWriter()
class SR(nn.Module):
def __init__(self):
super(SR, self).__init__()
self.up = nn.Upsample(scale_factor=2, mode='bicubic', align_corners=True)
# SISRNET
self.conv1 = nn.Conv2d(4, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.instancenorm1 = nn.InstanceNorm2d(64, affine=True, track_running_stats=True) # affine=True --> Learnable parameters
self.conv2 = nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.instancenorm2 = nn.InstanceNorm3d(64, affine=True, track_running_stats=True)
# FusionNet
self.conv3 = nn.Conv3d(64, 64, kernel_size=(2, 3, 3), stride=(1, 1, 1), padding=(0, 1, 1))
self.conv4 = nn.Conv2d(64, 4, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))
self.instancenorm3 = nn.InstanceNorm2d(4, affine=True, track_running_stats=True)
def forward(self, image_t1, image_t2, image_t3, image_t4, image_t5):
data_list = []
image_t1 = torch.unsqueeze(self.up(image_t1), 1)
data_list.append(image_t1)
image_t2 = torch.unsqueeze(self.up(image_t2), 1)
data_list.append(image_t2)
image_t3 = torch.unsqueeze(self.up(image_t3), 1)
data_list.append(image_t3)
image_t4 = torch.unsqueeze(self.up(image_t4), 1)
data_list.append(image_t4)
image_t5 = torch.unsqueeze(self.up(image_t5), 1)
data_list.append(image_t5)
input = torch.cat(data_list, dim=1)
images_SISRNET = []
# SISRNET
for i in range(input[1].__len__()): # SISRNET INDIVIDUAL PER IMAGE
x = input[:, i, :, :, :]
x = self.conv1(x)
x = self.instancenorm1(x)
x = F.leaky_relu(x)
x = self.conv2(x)
x = self.instancenorm1(x)
x = F.leaky_relu(x)
x = self.conv2(x)
x = self.instancenorm1(x)
x = F.leaky_relu(x)
x = self.conv2(x)
x = self.instancenorm1(x)
x = F.leaky_relu(x)
x = self.conv2(x)
x = self.instancenorm1(x)
x = F.leaky_relu(x)
x = self.conv2(x)
x = self.instancenorm1(x)
x = F.leaky_relu(x)
x = self.conv2(x)
x = self.instancenorm1(x)
x = F.leaky_relu(x)
x = self.conv2(x)
x = self.instancenorm1(x)
x = F.leaky_relu(x)
x = torch.unsqueeze(x, 2)
images_SISRNET.append(x)
# Concatenate tensors
x = torch.cat(images_SISRNET, dim=2)
# Mean of input images
image_interpol = torch.cat(data_list, dim=1)
image_mean = image_interpol.mean(dim=1)
# FusionNet
f = self.conv3(x)
f = self.instancenorm2(f)
f = F.leaky_relu(f)
f = self.conv3(f)
f = self.instancenorm2(f)
f = F.leaky_relu(f)
f = self.conv3(f)
f = self.instancenorm2(f)
f = F.leaky_relu(f)
f = self.conv3(f)
f = self.instancenorm2(f)
f = F.leaky_relu(f)
f = f.squeeze()
f = self.conv4(f)
f = self.instancenorm3(f)
f = F.leaky_relu(f)
output = f + image_mean
return output, f, image_mean
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (samples, t) in enumerate(train_loader):
image_t1 = samples['image_1'].to(device)
image_t2 = samples['image_2'].to(device)
image_t3 = samples['image_3'].to(device)
image_t4 = samples['image_4'].to(device)
image_t5 = samples['image_5'].to(device)
target = t['target'].to(device)
optimizer.zero_grad()
output, fusion, image_mean = model(image_t1, image_t2, image_t3, image_t4, image_t5)
loss_f = nn.MSELoss()
loss = loss_f(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx * len(samples) <= len(train_loader.dataset):
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * args.batch_size, len(train_loader.dataset),
100. * batch_idx * args.batch_size / len(train_loader.dataset), loss.item()))
tb.add_scalar("Loss_train", loss.item(), epoch)
def test(args, model, device, test_loader, epoch):
model.eval()
psnr_input = 0
psnr_output = 0
ssim_in = 0
ssim_out = 0
cnt = 1
with torch.no_grad():
for samples, t in test_loader:
image_t1 = samples['image_1'].to(device)
image_t2 = samples['image_2'].to(device)
image_t3 = samples['image_3'].to(device)
image_t4 = samples['image_4'].to(device)
image_t5 = samples['image_5'].to(device)
target = t['target'].to(device)
output, fusion, image_mean = model(image_t1, image_t2, image_t3, image_t4, image_t5)
image_t1 = image_t1.cpu().numpy()
image_t2 = image_t2.cpu().numpy()
image_t3 = image_t3.cpu().numpy()
image_t4 = image_t4.cpu().numpy()
image_t5 = image_t5.cpu().numpy()
real = target.cpu().numpy()
pred = output.cpu().numpy()
inp = image_mean.cpu().numpy()
fusion_img = fusion.cpu().numpy()
for i in range(real.shape[0]):
psnr_output += skm.peak_signal_noise_ratio(real[i, :, :, :], pred[i, :, :, :], data_range=real[i, :, :, :].max() - real[i, :, :, :].min())
psnr_input += skm.peak_signal_noise_ratio(real[i, :, :, :], inp[i, :, :, :], data_range=real[i, :, :, :].max() - real[i, :, :, :].min())
ssim_bands_out = 0
ssim_bands_in = 0
for j in range(real.shape[1]):
ssim_bands_out += skm.structural_similarity(real[i, j, :, :], pred[i, j, :, :], data_range = real[i, j, :, :].max() - real[i, j, :, :].min())
ssim_bands_in += skm.structural_similarity(real[i, j, :, :], inp[i, j, :, :], data_range = real[i, j, :, :].max() - real[i, j, :, :].min())
ssim_out = ssim_out + ssim_bands_out/real.shape[1]
ssim_in = ssim_in + ssim_bands_in/real.shape[1]
real_image = real[i, :, :, :].reshape(4, 128, 128)
predicted_image = pred[i, :, :, :].reshape(4, 128, 128)
fusion_image = fusion_img[i, :, :, :].reshape(4, 128, 128)
input_image = inp[i, :, :, :].reshape(4, 128, 128)
t1 = image_t1[i, :, :, :].reshape(4, 64, 64)
t2 = image_t2[i, :, :, :].reshape(4, 64, 64)
t3 = image_t3[i, :, :, :].reshape(4, 64, 64)
t4 = image_t4[i, :, :, :].reshape(4, 64, 64)
t5 = image_t5[i, :, :, :].reshape(4, 64, 64)
if i == args.batch_size - 1:
plot_images(t1, t2, t3, t4, t5, real_image, predicted_image, fusion_image, input_image, cnt, psnr_output/(cnt*args.batch_size), ssim_out/(cnt*args.batch_size), psnr_input/(cnt*args.batch_size), ssim_in/(cnt*args.batch_size))
cnt += 1
ssim_in /= len(test_loader.dataset)
ssim_out /= len(test_loader.dataset)
psnr_input /= len(test_loader.dataset)
psnr_output /= len(test_loader.dataset)
print('\nTest set: PSNR OUTPUT: ({:.2f} dB), PSNR INPUT: ({:.2f} dB), SSIM OUTPUT: ({:.2f}), SSIM INPUT: ({:.2f})\n'.format(
psnr_output, psnr_input, ssim_out, ssim_in))
def validation(args, model, device, validation_loader, epoch):
model.eval()
validation_loss = 0
psnr = 0
ssim = 0
with torch.no_grad():
for samples, t in validation_loader:
image_t1 = samples['image_1'].to(device)
image_t2 = samples['image_2'].to(device)
image_t3 = samples['image_3'].to(device)
image_t4 = samples['image_4'].to(device)
image_t5 = samples['image_5'].to(device)
target = t['target'].to(device)
output, fusion, image_mean = model(image_t1, image_t2, image_t3, image_t4, image_t5)
loss_f = nn.MSELoss()
real = target.cpu().numpy()
pred = output.cpu().numpy()
inp = image_mean.cpu().numpy()
for i in range(real.shape[0]):
validation_loss += loss_f(output[i, :, :, :], target[i, :, :, :])
psnr += skm.peak_signal_noise_ratio(real[i, :, :, :], pred[i, :, :, :], data_range=real[i, :, :, :].max() - real[i, :, :, :].min())
ssim_bands = 0
for j in range(real.shape[1]):
ssim_bands += skm.structural_similarity(real[i, j, :, :], pred[i, j, :, :], data_range = real[i, j, :, :].max() - real[i, j, :, :].min())
ssim = ssim + ssim_bands/real.shape[1]
tb.add_scalar("Loss_validation", validation_loss/(len(validation_loader.dataset)), epoch)
tb.add_scalar("SSIM_validation", ssim/(len(validation_loader.dataset)), epoch)
tb.add_scalar("PSNR_validation", psnr/(len(validation_loader.dataset)), epoch)
validation_loss /= len(validation_loader.dataset)
psnr /= len(validation_loader.dataset)
ssim /= len(validation_loader.dataset)
print('\nValidation set: Average loss: {:.4f}, PSNR: ({:.2f} dB), SSIM: ({:.2f})\n'.format(validation_loss, psnr, ssim))
def plot_images(t1, t2, t3, t4, t5, target, predicted, fusion, input_imag, cnt, psnr_output, ssim_out, psnr_input, ssim_in):
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(3, 5)
f_ax1 = fig.add_subplot(gs[0, 0])
f_ax2 = fig.add_subplot(gs[0, 1])
f_ax3 = fig.add_subplot(gs[0, 2])
f_ax4 = fig.add_subplot(gs[0, 3])
f_ax5 = fig.add_subplot(gs[0, 4])
f_ax6 = fig.add_subplot(gs[1, 0])
f_ax7 = fig.add_subplot(gs[1, 1])
f_ax8 = fig.add_subplot(gs[1, 2])
f_ax9 = fig.add_subplot(gs[1, 3])
f_ax10 = fig.add_subplot(gs[1, 4])
f_ax11 = fig.add_subplot(gs[2, 0])
f_ax12 = fig.add_subplot(gs[2, 1])
f_ax13 = fig.add_subplot(gs[2, 2])
f_ax14 = fig.add_subplot(gs[2, 3])
f_ax15 = fig.add_subplot(gs[2, 4])
ep.plot_rgb(t1, rgb=(2, 1, 0), ax=f_ax1, title="Input 1")
ep.plot_rgb(t2, rgb=(2, 1, 0), ax=f_ax2, title="Input 2")
ep.plot_rgb(t3, rgb=(2, 1, 0), ax=f_ax3, title="Input 3")
ep.plot_rgb(t4, rgb=(2, 1, 0), ax=f_ax4, title="Input 4")
ep.plot_rgb(t5, rgb=(2, 1, 0), ax=f_ax5, title="Input 5")
ep.plot_rgb(predicted, rgb=(2, 1, 0), ax=f_ax8, title="SR({:.2f}, {:.2f})".format(psnr_output, ssim_out))
ep.plot_rgb(target, rgb=(2, 1, 0), ax=f_ax6, title="HR(PSNR/SSIM)")
ep.plot_rgb(target, rgb=(3, 2, 1), ax=f_ax7, title="NIR, R, G")
ep.plot_rgb(predicted, rgb=(3, 2, 1), ax=f_ax9, title="NIR, R, G") # NIR R G
ep.plot_rgb(input_imag, rgb=(2, 1, 0), ax=f_ax10, title="B+M({:.2f}, {:.2f})".format(psnr_input, ssim_in))
ep.plot_rgb(fusion, rgb=(0, 0, 0), ax=f_ax12, title="FNet Red")
ep.plot_rgb(fusion, rgb=(1, 1, 1), ax=f_ax13, title="FNet Green")
ep.plot_rgb(fusion, rgb=(2, 2, 2), ax=f_ax14, title="FNet Blue")
ep.plot_rgb(fusion, rgb=(3, 3, 3), ax=f_ax15, title="FNet NIR")
ep.plot_rgb(fusion, rgb=(2, 1, 0), ax=f_ax11, title="FNet RGB")
fig.savefig("Results"+str(cnt)+".png")
def main():
# Training settings
parser = argparse.ArgumentParser(description='Deep neural network for Super-resolution of multitemporal '
'Remote Sensing Images')
parser.add_argument('--batch-size', type=int, default=16, metavar='N',
help='input batch size for training (default: 4)')
parser.add_argument('--test-batch-size', type=int, default=16, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 80)')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.5, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = True
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print(str(device))
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_dataset = SR_Dataset(csv_file='PATH',
root_dir='PATH', transform=ToTensor(), stand=True, norm=False)
test_dataset = SR_Dataset(csv_file='PATH',
root_dir='PATH', transform=ToTensor(), stand=True, norm=False)
validation_dataset = SR_Dataset(csv_file='PATH',
root_dir='PATH', transform=ToTensor(), stand=True, norm=False)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size, shuffle=True, drop_last=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.test_batch_size, shuffle=True, drop_last=True, **kwargs)
validation_loader = torch.utils.data.DataLoader(
validation_dataset,
batch_size=args.test_batch_size, shuffle=True, drop_last=True, **kwargs)
model = SR().to(device)
model = model.type(dst_type=torch.float32)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=20, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
validation(args, model, device, validation_loader, epoch)
if epoch == 100:
test(args, model, device, test_loader, epoch)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "SR_Net.pt")
tb.close()
if __name__ == '__main__':
main()
|
14,908 | 1668a6723d81836eece00b1cc75e84ec2c7d08c0 | ##Author: Rashmi Varma
##Created: September 28, 2017
##Career and Job Center Agent
##Agent accepts a list of free-form keywords from the command line and outputs Career
##and job opportunities that closely match the input keywords. Agent uses
##k-nearest neighbor algorithm to find matches for any given value of k.
libnames = ['math', 'os', 'operator','matplotlib', 'matplotlib.pyplot']
for libname in libnames:
try:
lib = __import__(libname)
except:
print ("One of the required libraries has not been installed. Please install %s" ,lib)
try:
from bs4 import BeautifulSoup
except:
print("\nCould not import Beautiful Soup library")
try:
import urllib2
except:
print("\nCould not import UrlLib2")
try:
import os.path
except:
print("\nCould not import os path")
try:
import csv
except:
print("\nCould not import csv")
##Checks if the input keyword is present in the dictionary, if not it scrapes
##from websites.
##If input present, it fetches the links associated with the keyword.
def clusterFunc(key):
clusterData = {}
try:
filename = 'Dictionary.csv'
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows[1]==key):
print("Keyword present in lookup file")
for rows in reader:
clusterData['title'] = rows[1]
clusterData['content'] = rows[4]
clusterData['link'] = rows[2]
agent(key, '0')
infile.close()
except:
print("The lookup table has no data. Please perform a search on Step 2 to populate the table")
##Compares the index of the sliced down Jaccard values according to k,
##to their position in the link's list. This makes finding the
##appropriate link to display easier
def list_duplicates_of(seq,item):
start_at = -1
locs = []
while True:
try:
loc = seq.index(item,start_at+1)
except ValueError:
break
else:
locs.append(loc)
start_at = loc
return locs
##def sendToScreen(d,knn_k,link):
## for i in range(0,knn_k):
## index = d[i]
## print(link[index])
## links = link[index]
## print('<a href="{0}">{0}</a>'.format(link))
##Actuator computes which documents are closest to our keyword and retrieves them
def actuator(k1,link, k):
## l = len(k1)
d=[]
d1=[]
knn_k = int(k)
orderedJacc = sorted(k1)
takingK = []
## for x in range(0,k):
takingK = (orderedJacc[:k])
for x in range(0,len(link)):
for k in takingK:
d.append(list_duplicates_of(k1,k))
count=0
for everyd in range(0,len(d)):
if count==knn_k:
break;
else:
dnd=d[everyd]
for nn in dnd:
d1.append(nn)
if len(d1)==knn_k:
break
else:
links = link[nn]
print ("\n",links)
count=count+1
#Here, we calculate Jaccard's distance and send it back to the Analysis
##function of the agent
## http://journocode.com/2016/03/10/similarity-and-distance-part-1/
def JacCal(str1, str2, lenFreq, lenLink, lenKey):
num = float(str1)
den = float(lenKey + lenLink)
deno = den - num
j = num/den
j = 1 - j
j = j*100
j = round(j,2)
return j
##This function sends data from agent about frequency to our
##function which computes Jaccard's distance
def frequencyAnalysis(freq, link, freqLength, lenLink,key,k):
k1 = []
for x in range(0,freqLength):
str1 = freq[x]
str2 = link[x]
jacc = JacCal(str1, str2, freqLength, lenLink, len(key))
k1.append(jacc)
actuator(k1, link,k)
##Agent computes all the details. Agent reads details from our table and computes
##frequency of the keyword's occurence in our retreieved links
def agent(key,k):
filename = 'Dictionary.csv'
title = []
content = []
link = []
freq = []
index = 0
ind = []
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
for rows in reader:
title.append(rows[1])
content.append(rows[4])
link.append(rows[2])
lenTitle = len(title)
lenContent = len(content)
lenLink = len(link)
infile.close()
kk = len(key)
for x in range(0,lenTitle):
countC = 0
for y in range(0,kk):
countC = countC + content[x].count(key[y])
freq.append(countC)
freqLength = len(freq)
frequencyAnalysis(freq, link, freqLength, lenLink,key,k)
##The function used to write to the file
def writeFile(key,title,link,src,content):
filename = 'Dictionary.csv'
lists = [key, title, link, src, content]
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows==lists):
print("\n\nAlready present")
infile.close()
else:
with open(filename,'a') as outfile:
try:
writer = csv.writer(outfile,dialect='excel')
writer.writerow(lists)
outfile.close()
except UnicodeError:
pass
##This function is used to retrieve the data from the URL's scrapped.
##Every job search opens to an individual page which contains more details about it.
##This function retrieves those details
def findContent(source, page,source_page):
co = []
urlPage1 = urllib2.urlopen(page)
soup1 = BeautifulSoup(urlPage1, 'html.parser')
urlPageIndeed = urllib2.urlopen(source_page)
soup2 = BeautifulSoup(urlPageIndeed, 'html.parser')
if source=='acm':
for everyline in soup1.find_all('span'):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
if source=='ieee':
for everyline in soup1.find_all('span'):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
if source == 'indeed':
for everyline in soup2.find_all('span',{'class':'summary','itemprop':'description'}):
if hasattr(everyline, "text"):
co.append(everyline.text)
return co
##The scrapper is a web scrapping function which uses BeautifulSoup library
##The scrapper scraps data and saves it to the lookup table for future use
def scrapper(source, page,key,k):
urlPage = urllib2.urlopen(page)
soup = BeautifulSoup(urlPage, 'html.parser')
if source=='acm' or 'ieee':
for row in soup.find_all('h3'):
if hasattr(row, "text"):
title = row.text
for a in row.find_all('a', href=True):
links = page + a['href']
src = source
content = findContent(source, links,page)
writeFile(key,title,links,src,content)
if source=='indeed':
for row in soup.find_all('a', {'target' : '_blank', 'data-tn-element' : 'jobTitle'}):
if hasattr(row, "text"):
title = row.text
l = row.get('href')
links = page + l
src = source
content = findContent(source, links,page)
writeFile(key,title,links,src,content)
##The sensor is responsible for getting input data to the agent.
## Here, the sensor readies the URL and calls the web scrapping function
##We have currently restricted the reading to 15 values per page. This can be increased but it also increases the execution time of the program
##The program currently takes 3-4 minutes for scrapping a new keyword sequence
def sensor(acm_page, ieee_page, indeed_page, keywords, k,key1):
print("\nGathering data...")
for everyKeyword in keywords:
acm_page = acm_page + everyKeyword
ieee_page = ieee_page + everyKeyword
indeed_page = indeed_page + everyKeyword
if len(keywords) > 1:
acm_page = acm_page + '+'
ieee_page = ieee_page + '+'
indeed_page = indeed_page + '+'
if len(keywords) > 1:
acm_page = acm_page[:-1]
ieee_page = ieee_page[:-1]
indeed_page = indeed_page[:-1]
acm_page = acm_page + '?rows=15'
ieee_page = ieee_page + '?rows=15'
scrapper('acm', acm_page,key1,k)
scrapper('ieee',ieee_page,key1,k)
scrapper('indeed',indeed_page,key1,k)
#The environment creates the url for scrapping data and sends these Url's to the sensor.
#The environment also checks if entered keyword is present in the look up table. Ideally if it is present, it won't send data to the sensor but simply read from look up table
def environment(keywords, k,key1):
filename = 'Dictionary.csv'
with open(filename,'rb') as infile:
reader = csv.reader(infile,dialect='excel')
rows = reader.next()
if(rows==key1):
print("\n\nAlready present")
infile.close()
acm_page = 'http://jobs.acm.org/jobs/results/keyword/'
ieee_page = 'http://jobs.ieee.org/jobs/results/keyword/'
indeed_page = 'https://www.indeed.com/jobs?q='
sensor(acm_page, ieee_page, indeed_page, keywords, k,key1)
agent(key1, k)
## The code runs continuously till 0 is pressed to quit it.
##On opening, the look up table gets created. If it already exists then we do nothing,
##otherwise we create and write headers to it
## Program can take multiple keywords as input from the user.
##User also takes the value of k here.These values are passed to the environment
def main():
quitFlag=False
filename = 'Dictionary.csv'
file_exists = os.path.isfile(filename)
headers = ['Keyword','Title','Link','Source','Content' ]
with open (filename, 'wb') as csvfile:
dw = csv.DictWriter(csvfile, headers)
dw.writeheader()
if not file_exists:
writer.writeheader()
while quitFlag==False:
keywords = []
key1 = []
keyCounter = 0
try:
x = int(raw_input("\nPlease select one of the options given below: \n0. Quit \n1. Find job ads \n2. Cluster\nYour choice:"))
except:
print("\nChoice entered is not an integer.")
try:
if x==0:
quitFlag==True
break
if x==1:
while keyCounter==0:
key = raw_input("\nPlease enter Job Title, keywords, etc (Separate multiple keywords by comma):")
if len(key) == 0:
print("\nPlease enter atleast one keyword to proceed")
keyCounter = 0
else:
keyCounter = 1
temp_keywords=key.split(',')
for everyKey in temp_keywords:
everyKey = everyKey.strip()
key1.append(everyKey)
temp = everyKey.replace(" ","+")
keywords.append(temp)
try:
k = int(raw_input("\nPlease enter how many job searches you want to see at a time(k):"))
except:
print("Value of number of job searches needs to be an integer only. Please run the program again and try search again")
break
environment(keywords, k,key1)
quitFlag=False
if x==2:
print("Clustering")
while keyCounter==0:
key = raw_input("\nPlease enter Job Title, keywords, etc (Separate multiple keywords by comma):")
if len(key) == 0:
print("\nPlease enter atleast one keyword to proceed")
keyCounter = 0
else:
keyCounter = 1
temp_keywords=key.split(',')
for everyKey in temp_keywords:
everyKey = everyKey.strip()
key1.append(everyKey)
temp = everyKey.replace(" ","+")
keywords.append(temp)
clusterFunc(key1)
else:
print("\nPlease input choices again")
quitFlag=False
except:
print("Please enter appropriate values only")
main()
|
14,909 | d4a6bfa8678851c31d63a03813e5b9984b913924 | # -*- coding: utf-8 -*-
import arcpy
import pandas as pd
#日本地図のShape
inFeatures = "D:\python\soccer\japan_ver80.shp"
#更新するフィーチャクラスがあるgdb
arcpy.env.workspace = "C:\ArcPySample\ArcPyJapan.gdb"
field_list = []
for field in arcpy.ListFields(inFeatures):
if field.type != "Geometry":
field_list.append(field.name)
df = pd.DataFrame(arcpy.da.FeatureClassToNumPyArray(inFeatures,field_list,null_value=-9999))
#グルーピング
df_group = df.groupby('KEN')['P_NUM','H_NUM'].sum()
for key,row in df_group.iterrows():
cursorJ = arcpy.UpdateCursor("Japan")
for rowJ in cursorJ:
if key == rowJ.KEN:
rowJ.setValue("P_NUM", row.P_NUM)
rowJ.setValue("H_NUM", row.H_NUM)
cursorJ.updateRow(rowJ) |
14,910 | de3abc02fcdbf86799ca821f4e5e89658a5e5d1b | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "WAW"
addresses_name = (
"2023-05-04/2023-03-28T16:20:27.419644/Democracy_Club__04May2023.tsv"
)
stations_name = (
"2023-05-04/2023-03-28T16:20:27.419644/Democracy_Club__04May2023.tsv"
)
elections = ["2023-05-04"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"100071258162", # BUDBROOKE LODGE FARM, HAMPTON ROAD, WARWICK
"100071258929", # FLAT VINE INN 86-88 WEST STREET, WARWICK
"10013184069", # THE BUNGALOW, GUYS CLIFFE STABLES, GUYS CLIFFE, WARWICK
"100071258712", # FLAT 32 SPINNEY HILL, WARWICK
"100071255338", # 14B REGENCY HOUSE NEWBOLD TERRACE, LEAMINGTON SPA
"10023406679", # 44 WARWICK PLACE, LEAMINGTON SPA
"10094931337", # 28 SANDPIT BOULEVARD, WARWICK
"10013183598", # THE FLAT THE WATERSIDE INN QUEENSWAY, LEAMINGTON SPA
"100071511638", # THE COTTAGE SHREWLEY COMMON, SHREWLEY
]:
return None
if record.addressline6 in [
"CV32 7AW",
"CV31 1BN",
"CV32 5TA",
"CV32 6AN",
"CV34 5BY",
"CV8 2FE",
"CV34 8BP", # UPPERFIELD ROAD, WARWICK
]:
return None
return super().address_record_to_dict(record)
def station_record_to_dict(self, record):
# Warnings below checked, no correction needed:
# Polling station Chadwick End Village Hall (11908) is in Solihull Metropolitan Borough Council (SOL)
# Polling station The Ramphal Building (11888) is in Coventry City Council (COV)
# St. Paul's Church Hall STATION B, Leicester Street, Royal Leamington Spa
if record.polling_place_id == "11626":
record = record._replace(polling_place_postcode="CV32 4TE")
# Temporary Building, (Grassed area), Fusiliers Way, Warwick
if record.polling_place_id == "11919":
record = record._replace(polling_place_postcode="CV34 8AG")
return super().station_record_to_dict(record)
|
14,911 | 97e9bf171fa134c833ac64204f4fa7689a358666 | import os, subprocess
import astropy.io.fits as fits
from astropy import wcs
import numpy as np
import scipy.ndimage as ndimage
from scipy.ndimage.filters import median_filter
from pyklip.instruments.Instrument import Data
import pyklip.klip as klip
class Ifs(Data):
"""
A spectral cube of Osiris IFS Data.
Args:
data_cube: FITS file list with 3D-cubes (Nwvs, Ny, Nx) with an osiris IFS data
telluric_cube: single telluric reference FITS file with a 3D-cube (Nwvs, Ny, Nx) with an osiris IFS data.
psf_cube_size: size of the psf cube to save (length along 1 dimension)
coaddslices: if not None, combine (addition) slices together to reduce the size of the spectral cube.
coaddslices should be an integer corresponding to the number of slices to be combined.
Attributes:
input: Array of shape (N,y,x) for N images of shape (y,x)
centers: Array of shape (N,2) for N centers in the format [x_cent, y_cent]
filenums: Array of size N for the numerical index to map data to file that was passed in
filenames: Array of size N for the actual filepath of the file that corresponds to the data
PAs: Array of N for the parallactic angle rotation of the target (used for ADI) [in degrees]
wvs: Array of N wavelengths of the images (used for SDI) [in microns]. For polarization data, defaults to "None"
IWA: a floating point scalar (not array). Specifies to inner working angle in pixels
output: Array of shape (b, len(files), len(uniq_wvs), y, x) where b is the number of different KL basis cutoffs
psfs: Spectral cube of size (Nwv, psfy, psfx) where psf_cube_size defines the size of psfy, psfx.
psf_center: [x, y] location of the center of the PSF for a frame in self.psfs
flipx: True by default. Determines whether a relfection about the x axis is necessary to rotate image North-up East left
nfiles: number of datacubes
nwvs: number of wavelengths
"""
# class initialization
# Coonstructor
def __init__(self, data_cube_list, telluric_cube,
guess_center=None,recalculate_center_cadi=False, centers = None,
psf_cube_size=21,
coaddslices=None, nan_mask_boxsize=0,median_filter_boxsize = 0,badpix2nan=False):
super(Ifs, self).__init__()
self.nfiles = len(data_cube_list)
# read in the data
self.filenums = []
self.filenames = []
self.prihdrs = []
self.wvs = []
self.centers = []
for k,data_cube in enumerate(data_cube_list):
with fits.open(data_cube) as hdulist:
print("Reading "+data_cube)
tmp_input = np.rollaxis(np.rollaxis(hdulist[0].data,2),2,1)
self.nwvs = tmp_input.shape[0]
try:
self.input = np.concatenate((self.input,tmp_input),axis=0) # 3D cube, Nwvs, Ny, Nx
except:
self.input = tmp_input
self.prihdrs.append(hdulist[0].header)
# Move dimensions of input array to match pyklip conventions
self.filenums.extend(np.ones(self.nwvs)*k)
self.filenames.extend([os.path.basename(data_cube),]*self.nwvs)
# centers are at dim/2
init_wv = self.prihdrs[k]["CRVAL1"]/1000. # wv for first slice in mum
dwv = self.prihdrs[k]["CDELT1"]/1000. # wv interval between 2 slices in mum
self.wvs.extend(np.arange(init_wv,init_wv+dwv*self.nwvs,dwv))
# Plate scale of the spectrograph
self.platescale = float(self.prihdrs[k]["SSCALE"])
if guess_center is None:
self.centers.extend(np.array([[img.shape[1]/2., img.shape[0]/2.] for img in tmp_input]))
else:
self.centers.extend(np.array([guess_center,]*self.nwvs))
if centers is not None:
self.centers = []
for x,y in centers:
self.centers.extend([[x,y],]*self.nwvs)
self.wvs = np.array(self.wvs)
self.centers = np.array(self.centers)
# TODO set the PAs right?
self.PAs = np.zeros(self.wvs.shape)
if badpix2nan:
box_w = 3
smooth_input = median_filter(self.input,size=(box_w,box_w,box_w))
res_input = np.abs((self.input - smooth_input))
res_input = res_input/np.nanstd(res_input,axis=(1,2))[:,None,None]
where_bad = np.where(res_input>5)
self.input[where_bad] = np.nan
self.input[np.where(self.input==0)] = np.nan
# import matplotlib.pyplot as plt
# for k in range(res_input.shape[0]):
# # plt.figure(1)
# # plt.imshow(res_input[k,::-1,:],interpolation="nearest")
# # plt.colorbar()
# plt.figure(2)
# plt.imshow(self.input[k,::-1,:],interpolation="nearest")
# plt.colorbar()
# # plt.figure(3)
# # res_input[k,::-1,:][np.where(res_input[k,::-1,:]>5)] = np.nan
# # plt.imshow(res_input[k,::-1,:],interpolation="nearest")
# # plt.colorbar()
# plt.show()
# import matplotlib.pyplot as plt
# for k in range(self.nwvs):
# plt.imshow(self.input[50*k,::-1,:])
# plt.colorbar()
# plt.show()
# read in the psf cube
with fits.open(telluric_cube) as hdulist:
psfs = hdulist[0].data # Nwvs, Ny, Nx
# Move dimensions of input array to match pyklip conventions
psfs = np.rollaxis(np.rollaxis(psfs,2),2,1)
# The definition of psfs_wvs requires that no wavelengths has been skipped in the input files
# But it works with keepslices
self.psfs_wvs = np.arange(init_wv,init_wv+dwv*self.nwvs,dwv)
# trim the cube
pixelsbefore = psf_cube_size//2
pixelsafter = psf_cube_size - pixelsbefore
psfs = np.pad(psfs,((0,0),(pixelsbefore,pixelsafter),(pixelsbefore,pixelsafter)),mode="constant",constant_values=0)
psfs_centers = np.array([np.unravel_index(np.nanargmax(img),img.shape) for img in psfs])
# Change center index order to match y,x convention
psfs_centers = [(cent[1],cent[0]) for cent in psfs_centers]
psfs_centers = np.array(psfs_centers)
center0 = np.median(psfs_centers,axis=0)
# TODO Calculate precise centroid
from pyklip.fakes import gaussfit2d
psfs_centers = []
self.star_peaks = []
self.psfs = np.zeros((psfs.shape[0],psf_cube_size,psf_cube_size))
for k,im in enumerate(psfs):
corrflux, fwhm, spotx, spoty = gaussfit2d(im, center0[0], center0[1], searchrad=5, guessfwhm=3, guesspeak=np.nanmax(im), refinefit=True)
#spotx, spoty = center0
psfs_centers.append((spotx, spoty))
self.star_peaks.append(corrflux)
# Get the closest pixel
xarr_spot = int(np.round(spotx))
yarr_spot = int(np.round(spoty))
# Extract a stamp around the sat spot
stamp = im[(yarr_spot-pixelsbefore):(yarr_spot+pixelsafter),\
(xarr_spot-pixelsbefore):(xarr_spot+pixelsafter)]
# Define coordinates grids for the stamp
stamp_x, stamp_y = np.meshgrid(np.arange(psf_cube_size, dtype=np.float32),
np.arange(psf_cube_size, dtype=np.float32))
# Calculate the shift of the sat spot centroid relative to the closest pixel.
dx = spotx-xarr_spot
dy = spoty-yarr_spot
# The goal of the following section is to remove the local background (or sky) around the sat spot.
# The plane is defined by 3 constants (a,b,c) such that z = a*x+b*y+c
# In order to do so we fit a 2D plane to the stamp after having masked the sat spot (centered disk)
stamp_r = np.sqrt((stamp_x-dx-psf_cube_size//2)**2+(stamp_y-dy-psf_cube_size//2)**2)
from copy import copy
stamp_masked = copy(stamp)
stamp_x_masked = stamp_x-dx
stamp_y_masked = stamp_y-dy
stamp_center = np.where(stamp_r<7)
stamp_masked[stamp_center] = np.nan
stamp_x_masked[stamp_center] = np.nan
stamp_y_masked[stamp_center] = np.nan
background_med = np.nanmedian(stamp_masked)
stamp_masked = stamp_masked - background_med
#Solve 2d linear fit to remove background
xx = np.nansum(stamp_x_masked**2)
yy = np.nansum(stamp_y_masked**2)
xy = np.nansum(stamp_y_masked*stamp_x_masked)
xz = np.nansum(stamp_masked*stamp_x_masked)
yz = np.nansum(stamp_y_masked*stamp_masked)
#Cramer's rule
a = (xz*yy-yz*xy)/(xx*yy-xy*xy)
b = (xx*yz-xy*xz)/(xx*yy-xy*xy)
stamp = stamp - (a*(stamp_x-dx)+b*(stamp_y-dy) + background_med)
stamp = ndimage.map_coordinates(stamp, [stamp_y+dy, stamp_x+dx])
self.psfs[k,:,:] = stamp
# import matplotlib.pyplot as plt
# plt.figure(1)
# plt.imshow(np.nanmean(psfs,axis=0))
# plt.figure(2)
# plt.imshow(np.nanmean(self.psfs,axis=0))
# plt.show()
# Spectrum of the telluric star
self.star_peaks = np.array(self.star_peaks)
# TODO include brightness of the telluric star
self.dn_per_contrast = np.array([self.star_peaks[np.where(self.psfs_wvs==wv)[0]] for wv in self.wvs])
# we don't need to flip x for North Up East left
self.flipx = False
# I have no idea
self.IWA = 0.0
# Infinity...
self.OWA = 10000
self._output = None
if coaddslices is not None:
N_chunks = self.psfs.shape[0]//coaddslices
self.psfs = np.array([np.nanmean(self.psfs[k*coaddslices:(k+1)*coaddslices,:,:],axis=0) for k in range(N_chunks)])
self.psfs_wvs = np.array([np.nanmean(self.psfs_wvs[k*coaddslices:(k+1)*coaddslices]) for k in range(N_chunks)])
new_wvs = []
new_filenums = []
new_centers = []
new_PAs = []
new_filenames = []
new_dn_per_contrast = []
# new_wcs = []
for k in range(self.nfiles):
tmp_input = copy(self.input[k*self.nwvs:(k+1)*self.nwvs,:,:])
tmp_input = np.array([np.nanmean(tmp_input[l*coaddslices:(l+1)*coaddslices,:,:],axis=0) for l in range(N_chunks)])
try:
new_input = np.concatenate((new_input,tmp_input),axis=0) # 3D cube, Nwvs, Ny, Nx
except:
new_input = tmp_input
new_nwvs = tmp_input.shape[0]
new_wvs.extend(np.array([np.nanmean(self.wvs[k*self.nwvs:(k+1)*self.nwvs][l*coaddslices:(l+1)*coaddslices]) for l in range(N_chunks)]))
new_filenums.extend(np.array([self.filenums[k*self.nwvs:(k+1)*self.nwvs][l*coaddslices] for l in range(N_chunks)]))
new_centers.extend([np.nanmean(self.centers[k*self.nwvs:(k+1)*self.nwvs,:][l*coaddslices:(l+1)*coaddslices,:],axis=0) for l in range(N_chunks)])
new_PAs.extend([self.PAs[k*self.nwvs:(k+1)*self.nwvs][l*coaddslices] for l in range(N_chunks)])
new_filenames.extend([self.filenames[k*self.nwvs:(k+1)*self.nwvs][l*coaddslices] for l in range(N_chunks)])
new_dn_per_contrast.extend([np.nanmean(self.dn_per_contrast[k*self.nwvs:(k+1)*self.nwvs][l*coaddslices:(l+1)*coaddslices]) for l in range(N_chunks)])
# new_wcs.extend([self.wcs[k*self.nwvs:(k+1)*self.nwvs][l*coaddslices] for l in range(N_chunks)])
self.input = new_input
self.wvs = np.array(new_wvs)
self.nwvs = new_nwvs
self.filenums = np.array(new_filenums)
self.centers = np.array(new_centers)
self.PAs = np.array(new_PAs)
self.filenames = new_filenames
self.dn_per_contrast = np.array(new_dn_per_contrast)
# self.wcs = new_wcs
self.dn_per_contrast = np.squeeze(self.dn_per_contrast)
# # TODO: need to check how it works (cf GPI)
# self.wcs = np.array([None for _ in range(self.nfiles * self.nwvs)])
# Creating WCS info for OSIRIS
self.wcs = []
for vert_angle in self.PAs:
w = wcs.WCS()
vert_angle = np.radians(vert_angle)
pc = np.array([[(-1)*np.cos(vert_angle), (-1)*-np.sin(vert_angle)],[np.sin(vert_angle), np.cos(vert_angle)]])
cdmatrix = pc * self.platescale /3600.
w.wcs.cd = cdmatrix
self.wcs.append(w)
self.wcs = np.array(self.wcs)
# import matplotlib.pyplot as plt
# for k in range(self.nwvs):
# plt.imshow(self.input[k,::-1,:])
# plt.colorbar()
# plt.show()
if median_filter_boxsize != 0:
self.input = median_filter(self.input,size=(1,median_filter_boxsize,median_filter_boxsize))
self.psfs = median_filter(self.psfs,size=(1,median_filter_boxsize,median_filter_boxsize))
if nan_mask_boxsize != 0:
# zeros are nans, and anything adjacient to a pixel less than zero is 0.
input_nans = np.where(np.isnan(self.input))
self.input[input_nans] = 0
input_minfilter = ndimage.minimum_filter(self.input, (0, nan_mask_boxsize, nan_mask_boxsize))
self.input[np.where(input_minfilter <= 0)] = np.nan
self.input[:,0:nan_mask_boxsize//2,:] = np.nan
self.input[:,-nan_mask_boxsize//2+1::,:] = np.nan
self.input[:,:,0:nan_mask_boxsize//2] = np.nan
self.input[:,:,-nan_mask_boxsize//2+1::] = np.nan
# for wv_index in range(self.psfs.shape[0]):
# model_psf = self.psfs[wv_index, :, :]
# import matplotlib.pyplot as plt
# plt.imshow(model_psf)
# plt.show()
# import matplotlib.pyplot as plt
# for k in range(self.nwvs):
# plt.imshow(self.input[10*k,::-1,:])
# plt.colorbar()
# plt.show()
# Required for automatically querying Simbad for the spectral type of the star.
self.object_name = "HR8799"#self.prihdr["OBJECT"]
if recalculate_center_cadi:
for k in range(self.nfiles):
tmp_input = copy(self.input[k*self.nwvs:(k+1)*self.nwvs,:,:])
if guess_center is None:
xcen0,ycen0 = tmp_input.shape[2]/2., tmp_input.shape[1]/2.
else:
xcen0,ycen0 = guess_center
range_list = [100,20,4,1]
samples = 10
for it,width in enumerate(range_list):
x_list,y_list = np.linspace(xcen0-width/2.,xcen0+width/2.,samples),np.linspace(ycen0-width/2.,ycen0+width/2.,samples)
# print(x_list,y_list)
xcen_grid,ycen_grid = np.meshgrid(x_list,y_list)
cost_func = np.zeros(xcen_grid.shape)
cost_func.shape = [np.size(cost_func)]
import multiprocessing as mp
import itertools
self.N_threads = mp.cpu_count()
pool = mp.Pool(processes=self.N_threads)
#multitask this
outputs_list = pool.map(casdi_residual_star, itertools.izip(xcen_grid.ravel(),
ycen_grid.ravel(),
itertools.repeat(tmp_input),
itertools.repeat(self.wvs[k*self.nwvs:(k+1)*self.nwvs])))
for l,out in enumerate(outputs_list):
cost_func[l] = out
pool.close()
xcen0 = xcen_grid.ravel()[np.argmin(cost_func)]
ycen0 = ycen_grid.ravel()[np.argmin(cost_func)]
# import matplotlib.pyplot as plt
# cost_func.shape = (samples,samples)
# plt.figure(1)
# plt.subplot(2,1,1)
# plt.imshow(xcen_grid[::-1,:],interpolation="nearest")
# plt.colorbar()
# plt.subplot(2,1,2)
# plt.imshow(ycen_grid[::-1,:],interpolation="nearest")
# plt.colorbar()
# plt.figure(2)
# plt.imshow(cost_func[::-1,:],interpolation="nearest")
# plt.figure(3)
# plt.plot(self.wvs[k*self.nwvs:(k+1)*self.nwvs])
# plt.show()
# print(k,xcen0,ycen0)
self.centers[k*self.nwvs:(k+1)*self.nwvs,:] = np.array([(xcen0,ycen0),]*tmp_input.shape[0])
# else:
# # from scipy.optimize import leastsq
# # LSQ_func = lambda para: casdi_residual(para[0],para[1],self.input,self.wvs,nan2zero=True)
# # new_cent = leastsq(LSQ_func,(img.shape[1]/2.-sep_planet/ 0.02, img.shape[0]/2.))
# from scipy.optimize import minimize
# LSQ_func = lambda para: np.nanvar(casdi_residual(para[0],para[1],self.input,self.wvs,nan2zero=False))
# new_cent = minimize(LSQ_func,(img.shape[1]/2.-sep_planet/ 0.02, img.shape[0]/2.),method="nelder-mead",options={'disp':True})
# # casdi_residual(cent[0],cent[1],self.input,self.wvs)
# print("old",(img.shape[1]/2.-sep_planet/ 0.02, img.shape[0]/2.))
# print("new_cent",new_cent.x)
# exit()
################################
### Instance Required Fields ###
################################
@property
def input(self):
return self._input
@input.setter
def input(self, newval):
self._input = newval
@property
def centers(self):
return self._centers
@centers.setter
def centers(self, newval):
self._centers = newval
@property
def filenums(self):
return self._filenums
@filenums.setter
def filenums(self, newval):
self._filenums = newval
@property
def filenames(self):
return self._filenames
@filenames.setter
def filenames(self, newval):
self._filenames = newval
@property
def PAs(self):
return self._PAs
@PAs.setter
def PAs(self, newval):
self._PAs = newval
@property
def wvs(self):
return self._wvs
@wvs.setter
def wvs(self, newval):
self._wvs = newval
@property
def wcs(self):
return self._wcs
@wcs.setter
def wcs(self, newval):
self._wcs = newval
@property
def IWA(self):
return self._IWA
@IWA.setter
def IWA(self, newval):
self._IWA = newval
@property
def output(self):
return self._output
@output.setter
def output(self, newval):
self._output = newval
###############
### Methods ###
###############
def readdata(self, filepaths):
"""
Reads in the data from the files in the filelist and writes them to fields
"""
pass
def savedata(self, filepath, data,center=None, klipparams=None, filetype="", zaxis=None , more_keywords=None,
pyklip_output=True):
"""
Save SPHERE Data.
Args:
filepath: path to file to output
data: 2D or 3D data to save
center: center of the image to be saved in the header as the keywords PSFCENTX and PSFCENTY in pixels.
The first pixel has coordinates (0,0)
klipparams: a string of klip parameters
filetype: filetype of the object (e.g. "KL Mode Cube", "PSF Subtracted Spectral Cube")
zaxis: a list of values for the zaxis of the datacub (for KL mode cubes currently)
more_keywords (dictionary) : a dictionary {key: value, key:value} of header keywords and values which will
written into the primary header
pyklip_output: (default True) If True, indicates that the attributes self.output_wcs and self.output_centers
have been defined.
"""
hdulist = fits.HDUList()
hdulist.append(fits.PrimaryHDU(data=data,header=self.prihdrs[0]))
# save all the files we used in the reduction
# we'll assume you used all the input files
# remove duplicates from list
filenames = np.unique(self.filenames)
nfiles = np.size(filenames)
hdulist[0].header["DRPNFILE"] = (nfiles, "Num raw files used in pyKLIP")
for i, filename in enumerate(filenames):
hdulist[0].header["FILE_{0}".format(i)] = filename + '.fits'
# write out psf subtraction parameters
# get pyKLIP revision number
pykliproot = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# the universal_newline argument is just so python3 returns a string instead of bytes
# this will probably come to bite me later
try:
pyklipver = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=pykliproot, universal_newlines=True).strip()
except:
pyklipver = "unknown"
hdulist[0].header['PSFSUB'] = ("pyKLIP", "PSF Subtraction Algo")
hdulist[0].header.add_history("Reduced with pyKLIP using commit {0}".format(pyklipver))
hdulist[0].header['CREATOR'] = "pyKLIP-{0}".format(pyklipver)
# store commit number for pyklip
hdulist[0].header['pyklipv'] = (pyklipver, "pyKLIP version that was used")
if klipparams is not None:
hdulist[0].header['PSFPARAM'] = (klipparams, "KLIP parameters")
hdulist[0].header.add_history("pyKLIP reduction with parameters {0}".format(klipparams))
# write z axis units if necessary
if zaxis is not None:
# Writing a KL mode Cube
if "KL Mode" in filetype:
hdulist[0].header['CTYPE3'] = 'KLMODES'
# write them individually
for i, klmode in enumerate(zaxis):
hdulist[0].header['KLMODE{0}'.format(i)] = (klmode, "KL Mode of slice {0}".format(i))
hdulist[0].header['CUNIT3'] = "N/A"
hdulist[0].header['CRVAL3'] = 1
hdulist[0].header['CRPIX3'] = 1.
hdulist[0].header['CD3_3'] = 1.
#use the dataset center if none was passed in
if not pyklip_output:
center = self.centers[0]
else:
center = self.output_centers[0]
if center is not None:
hdulist[0].header.update({'PSFCENTX': center[0], 'PSFCENTY': center[1]})
hdulist[0].header.update({'CRPIX1': center[0], 'CRPIX2': center[1]})
hdulist[0].header.add_history("Image recentered to {0}".format(str(center)))
hdulist.writeto(filepath, overwrite=True)
hdulist.close()
def calibrate_output(self, img, spectral=False, units="contrast"):
"""
Calibrates the flux of an output image. Can either be a broadband image or a spectral cube depending
on if the spectral flag is set.
Assumes the broadband flux calibration is just multiplication by a single scalar number whereas spectral
datacubes may have a separate calibration value for each wavelength
Args:
img: unclaibrated image.
If spectral is not set, this can either be a 2-D or 3-D broadband image
where the last two dimensions are [y,x]
If specetral is True, this is a 3-D spectral cube with shape [wv,y,x]
spectral: if True, this is a spectral datacube. Otherwise, it is a broadband image.
units: currently only support "contrast" w.r.t central star
Return:
img: calibrated image of the same shape (this is the same object as the input!!!)
"""
if units == "contrast":
if spectral:
# spectral cube, each slice needs it's own calibration
numwvs = img.shape[0]
img /= self.dn_per_contrast[:numwvs, None, None]
else:
# broadband image
img /= np.nanmean(self.dn_per_contrast)
self.flux_units = "contrast"
return img
def casdi_residual_star(params):
"""
Convert `f([1,2])` to `f(1,2)` call.
It allows one to call casdi_residual() with a tuple of parameters.
"""
return np.nanvar(casdi_residual(*params))
def casdi_residual(xcen,ycen,input,wvs,nan2zero = False):
input_scaled = np.zeros(input.shape)
ref_wv = np.mean(wvs)
for k,wv in enumerate(wvs):
input_scaled[k,:,:] = klip.align_and_scale(input[k,:,:],(xcen,ycen),(xcen,ycen),ref_wv/wv)
# input_sub = np.zeros(input.shape)
# lib_size = np.max([np.size(wvs)/10,10])
# for k,wv in enumerate(wvs):
# # print(k,np.size(wvs))
# input_sub[k,:,:] = input_scaled[k,:,:] - np.nanmedian(input_scaled[np.max([0,k-lib_size]):np.min([np.size(wvs),k+lib_size]),:,:],axis=0)
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
input_sub = input_scaled - np.nanmedian(input_scaled,axis=0)[None,:,:]
if nan2zero:
input_sub[np.where(np.isnan(input_sub))] = 0
# for k,wv in enumerate(wvs):
# input_sub[k,:,:] = klip.align_and_scale(input_sub[k,:,:],(xcen,ycen),(xcen,ycen),wv/ref_wv)
# print(xcen,ycen,np.nansum(input_sub**2),np.nanvar(input_sub.ravel()))
# import matplotlib.pyplot as plt
# plt.figure(1)
# plt.imshow(np.nanmedian(input,axis=0))
# print(np.nanvar(np.nanmedian(input,axis=0).ravel()))
# plt.colorbar()
# plt.figure(2)
# plt.imshow(np.nanmedian(input_scaled,axis=0))
# print(np.nanvar(np.nanmedian(input_scaled,axis=0).ravel()))
# plt.colorbar()
# plt.figure(3)
# plt.imshow(np.nanmedian(input_sub,axis=0))
# print(np.nanvar(np.nanmedian(input_sub,axis=0).ravel()))
# plt.colorbar()
# plt.show()
return input_sub.ravel() |
14,912 | ea1596099c323315a947612cf6fe484f31caf05d | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 29 12:40:03 2016
@author: taivo
"""
import igraph
g = igraph.Graph(3)
igraph.plot(g, "test.png") |
14,913 | 672f5dd5553463e4ab23e46d67a12b081fb800b1 | # Question Regarding API Design
request.is_ajax()
|
14,914 | 3bdb8639cb53fdcd70894c6338f96b4a147ec6ce | from keopscore.formulas.Operation import Operation
from keopscore.utils.code_gen_utils import (
c_variable,
c_for_loop,
)
####################################
###### Tensor product #####
####################################
class TensorProd(Operation):
string_id = "TensorProd"
def __init__(self, arg0, arg1, params=()):
# N.B. params keyword is used for compatibility with base class, but should always equal ()
if params != ():
KeOps_Error("There should be no parameter.")
super().__init__(arg0, arg1)
self.dim = arg0.dim * arg1.dim
def Op(self, out, table, arg0, arg1):
q = c_variable("int")
loop, k = c_for_loop(0, arg0.dim, 1, pragma_unroll=True)
inner_loop, l = c_for_loop(0, arg1.dim, 1, pragma_unroll=True)
return f"""
#if C_CONTIGUOUS // row major
{q.declare_assign(0)}
{loop(inner_loop(out[q].assign(arg0[k] * arg1[l]) + q.add_assign(1)))}
#else // column major
{q.declare_assign(0)}
{loop(inner_loop(out[k + l * arg0.dim].assign(arg0[k] * arg1[l]) + q.add_assign(1)))}
#endif
"""
def DiffT(self, v, gradin):
from keopscore.formulas import MatVecMult, VecMatMult
f, g = self.children
return f.DiffT(v, MatVecMult(gradin, g)) + g.DiffT(v, VecMatMult(f, gradin))
|
14,915 | 355580a75d9802193a0eae3474d92aaf6d4a0726 | """webweb2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from matches import views
from matches.views import ChartData
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.home,name='home'),
path('search',views.search,name='search'),
path('find/<str:queryA>-<str:queryB>/',views.find,name='find'),
path('add',views.add,name='add'),
path('add/process',views.process,name='process'),
path('adder/<str:queryA>-<str:queryB>/',views.process2,name='adder'),
path('random',views.random_match,name='random_match'),
path('vote',views.vote,name='vote'),
path('chart/data',ChartData.as_view()),
]
if settings.DEBUG:
urlpatterns+=static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
14,916 | 8ef215135f04ef03275d811f52ec600299be6478 | # Generated by Django 3.1.3 on 2020-12-01 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Tasker', '0015_auto_20201130_1559'),
]
operations = [
migrations.AlterField(
model_name='url',
name='links',
field=models.URLField(null=True),
),
]
|
14,917 | ba92e368c247bf07e9242f1247624559766311cc | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 29 15:16:25 2020
@author: Vasilis
"""
import turtle
#%%
#for i in range(100):
# print("We like Python's turtles!")
#%%
#wn = turtle.Screen()
turtle.clearscreen()
vasi=turtle.Turtle()
for i in range(3):
vasi.forward(50)
vasi.left(120)
for i in range(4):
vasi.forward(50)
vasi.left(90)
for i in range(5):
vasi.forward(50)
vasi.left(72)
for i in range(6):
vasi.forward(50)
vasi.left(60)
for i in range(8):
vasi.forward(50)
vasi.left(45)
turtle.bye()
#%%% draw a star
turtle.clearscreen()
tasos=turtle.Turtle()
for i in range(5):
tasos.forward(150)
tasos.left(216)
#turtle.clearscreen()
#turtle.bye()
#%% draw a clock
turtle.clearscreen()
takis = turtle.Turtle()
takis.shape("turtle")
takis.color("blue")
turtle.bgcolor("lightgreen")
takis.stamp()
for i in range(12):
takis.up()
takis.forward(250)
takis.down()
takis.forward(30)
takis.up()
takis.forward(10)
takis.stamp()
takis.backward(290)
takis.right(30)
#%% draw something cfeative
turtle.clearscreen()
takis = turtle.Turtle()
takis.shape("turtle")
takis.color("blue")
turtle.bgcolor("lightgreen")
takis.speed(0)
takis.forward(50)
takis.left(90)
takis.forward(50)
takis.left(45)
takis.forward(50)
takis.left(90)
takis.forward(50)
takis.left(45)
takis.forward(50)
takis.left(90)
takis.forward(60)
#%% draw creative ly 2
vasi = turtle.Turtle()
wn = turtle.Screen()
vasi.speed(0)
for i in range(100):
vasi.forward(50)
vasi.left(72+i)
|
14,918 | 8796e341299d17c23706099aad0e36eadac5c312 | def verify(num1,num2):
if num1 > num2:
return num1
elif num1 == num2:
return 1
else:
return num2
def display(arg1,arg2):
if(verify(arg1,arg2)==arg1):
print("A")
elif(verify(arg1,arg2)==1):
print("C")
else:
print("B")
display(1000,3500) |
14,919 | 93c39f0f5c27a2ea910b2519a0360c5b797b53ae | import numpy as np
import matplotlib.pyplot as plt
def load(densityfile):
data={}
data['temperature'],data['density']=np.loadtxt(densityfile,unpack=True)
return data
def fit(x,y,substance):
plt.plot(x,y,'o',
markerfacecolor='#2166ac',
markeredgecolor='#053061',
markersize=8,
markeredgewidth=1.5)
for deg, linecol in [(1,'#67001f'),(2,'#006837')]:
coeff=np.polyfit(x,y,deg)
y_fit=np.poly1d(coeff)
plt.plot(x,y_fit(x),"--",
color=linecol)
x_range = x.max() - x.min()
y_range = y.max() - y.min()
plt.xlim([x.min() - 0.1 * x_range, x.max() + 0.1 * x_range])
plt.ylim([y.min() - 0.1 * y_range, y.max() + 0.1 * y_range])
plt.xlabel('Temperature')
plt.ylabel('Density')
plt.title('Temperature dependence of %s density' % substance)
plt.legend(['data',
'fitted degree 1 polynomial',
'fitted degree 2 polynomial'])
plt.show()
data = load('density_air.dat')
fit(data['temperature'], data['density'], 'air')
# plot fits to water density
data = load('density_water.dat')
fit(data['temperature'], data['density'], 'water')
|
14,920 | d4bc112955e9ad9c7ba82f3549eab636fbe3941a | from flaskRun import getDriver
def f_1mg(medicine):
# driver = getDriver()
# URL = "https://www.1mg.com/search/all?name="+medicine
# r = requests.get(URL)
# soup = BeautifulSoup(r.content,'html.parser')
# # print(soup.prettify())
# name = ""
# cost = ""
# name_table = soup.find('div',attrs = {'class':'style__product-description___1vPQe'})
# if(name_table):
# for row in name_table:
# name += row.text
# name += ','
# name = name[:-1]
# cost_table = soup.find('div',attrs = {'class':'style__price-tag___B2csA'})
# for row in cost_table:
# cost += row.string
# return name + ": " + cost
# else :
# return medicine + ' not found'
def f_pharmeasy(medicine):
# driver = getDriver()
# URL = "https://pharmeasy.in/search/all?name=" + medicine
# r = requests.get(URL)
# soup = BeautifulSoup(r.content,'html.parser')
# # print(soup.prettify())
# name = ""
# cost = ""
# desc = ""
# name_table = soup.find('h1',attrs = {'class':'ooufh'})
# if(name_table):
# for row in name_table:
# name = row.string
# name += ','
# desc_table = soup.find('div',attrs = {'class':'_36aef'})
# if(desc_table):
# for row in desc_table:
# desc = row.string
# desc += ':'
# cost_table = soup.find('div',attrs = {'class':'_1_yM9'})
# for row in cost_table:
# cost += str(row.string)
# if cost[-1]=='*' :
# cost = cost[:-1]
# return name + desc + cost
# return medicine + ' not found'
# return medicine + ' not found'
def f_apollo(medicine):
# driver = getDriver()
# driver.get("https://www.apollopharmacy.in/tsearch?q="+medicine)
# try:
# unavailable = driver.find_element_by_class_name("no-products")
# return medicine + ' not found'
# except NoSuchElementException :
# pdt = driver.find_element_by_class_name("tagalys-product-tile")
# a = pdt.text.split('\n')
# return (a[0] + ' '+ a[1]).capitalize()
# # pass
def f_netmeds(medicine):
# driver = getDriver()
# URL = "https://www.netmeds.com/catalogsearch/result?q=" + medicine
# driver.get(URL)
# try:
# pdt = driver.find_element_by_class_name("drug_list")
# a = pdt.text.split('\n')[:5]
# return a[0] +' '+ a[2]
# except NoSuchElementException:
# return medicine + ' not found'
|
14,921 | 1ed3087b7a39324fd29f128abd989a9c53bb8406 | from datetime import timedelta
import socket
import os
class Config:
SERVICE_NAME = 'Minitwit'
HOST = socket.gethostbyname(socket.gethostname())
PORT = 80
DEBUG = True
RUN_SETTINGS = {
"host": HOST,
"port": PORT,
"debug": DEBUG
}
JWT_ACCESS_TOKEN_EXPIRES = timedelta(days=1)
JWT_REFRESH_TOKEN_EXPIRES = timedelta(days=30)
MONGODB_SETTINGS = {
'db': SERVICE_NAME,
'host': None,
'port': None,
'username': os.getenv('MONGO_ID'),
'password': os.getenv('MONGO_PW')
}
SWAGGER = {
'title': SERVICE_NAME,
'uiversion': 3,
'info': {
'title': SERVICE_NAME + ' API',
'version': '1.0',
'description': 'test'
},
'host': 'localhost',
'basePath': '/api'
}
SWAGGER_TEMPLATE = {
'schemes': [
'http'
],
'tags': [
{
'name': '[User] 계정',
'description': '계정 관련 API'
},
{
'name': '[User] 계정 관리',
'description': '계정 관리 관련 API'
},
{
'name': '[User] 게시글',
'description': '모든 권한으로 접근 가능한 게시글 관련 API'
},
{
'name': '[User] 게시글 관리',
'description': '게시글 관리 관련 API'
}
]
}
# SQLALCHEMY_DATABASE_URI = "sqlite:///models/models.db"
# SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = 'IAMASECRETKEY'
|
14,922 | 0bf87f7e61b614a4331e57113ac46f8986b446bf | import numpy as np
def app_products2optimisation(products):
# вес P — x1, к --x2
# c = [-250,-210] #Функция цели 250x1+210x2 – уменьшение расходов на продуктовую корзину ;
# A_ub = [[15,4]] #'1' жиры последовательно для Р и К
# b_ub = [14] #'1' 15x1+4x2 <=14 – ограничение количества жира;
# A_eq = [[150,200]] #'2' 150x1+200x2 =300 – ограничение количества калорий.
# b_eq = [300] #'2'
# print (linprog(c, A_ub, b_ub, A_eq, b_eq))
if not products:
return []
n = len(products)-2
c = []
ub = []
eq = []
for i in range(0,n):
c.append(products[i].default_price)
ub.append(products[i].fat)
eq.append(products[i].calories)
i += 1 # Перехожу к столбцу свободных членов для уравнения меньше чем, в продуктах это вещь под назавнием Minimise Fat
ub.append(products[i].fat)
A_ub = np.array([ub])
c.append(products[i].default_price)
i += 1 # Перехожу к столбцу свободных членов для уравнения больше чем, в продуктах это вещь под назавнием Get enough calories
eq.append(products[i].calories)
A_eq = np.array([eq])
A_eq = -1 * A_eq # Для > меняем знак всех элементов (потом написать в цикл автоматом по условию!)
c = np.array([c])
A_eq = np.squeeze(A_eq) # Django мне создала списки двойного уровня вложенности, надо уменьшить это
A_ub = np.squeeze(A_ub)
c = np.squeeze(c)
table = [A_ub, A_eq, c]
table = np.asarray(table, dtype=float)
b = np.identity(n+1) # создаю единичную матрицу
# amount_columns = np.size(table,1)
table = np.insert(table,n,b,axis=1) # вставляю единичную матрицу в таблицу перед столбцом с номером amount_columns-1, получается n+1, по столбцам, т.е. axis 1
print('np.size(table)',np.size(table))
print(table)
#return type(table)
result = minimise(table)
result[n] = -1 * result[n]
print(result)
return result
# return [1, 2, 3]
def amount_rows(amount_constraints):
amount_rows = amount_constraints+1
return amount_rows
def amount_columns(amount_variables,amount_constraints):
amount_columns = amount_variables+amount_constraints+2
return amount_columns
def create_simplex_table(amount_variables,amount_constraints):
#Создание двумерного массива
r = amount_rows(amount_constraints)
c = amount_columns(amount_variables,amount_constraints)
empty_table = np.zeros((r, c))
return empty_table
#Количество строк = кол-ву ограничений + целевая функция amount_constraints+1
#Количество столбцов = кол-ву переменных + кол-ву дополнительных переменных для приведения системы к каноническому виду + M (max/min) + Колонка свободных членов
def input_to_list(equation): #Перевод в list
equation = equation.split(',')
if '>' in equation:
text_index = equation.index('>')
del equation[text_index]
equation = [float(i) * -1 for i in equation]
return equation
if '<' in equation:
text_index = equation.index('<')
del equation[text_index]
equation = [float(i) for i in equation]
return equation
def xi_names(table):
# Создание названий переменных x1, x2, ..., xn
amount_rows = np.size(table,0) # Подсчет количества строк с помощью axis=0
amount_columns = np.size(table,1) # Подсчет количества столбцов с помощью axis=1
xi = amount_columns - amount_rows -1
names = []
for i in range(xi):
names.append('x' + str(i + 1))
return names
def lines4constraints_available(filled_table):
# Проверка, что в предпоследней строке нули, чтобы добавить ещё ограничение
penultimate_row = filled_table[-2:-1]
return (~penultimate_row.any(axis=1)).any()
#The any method returns True if any value in the array is "truthy".
# Nonzero numbers are considered True, and 0 is considered False.
# By using the argument axis=1, the method is applied to each row.
def add_constraint(table, equation):
# Заполнение свободных строк для ограничений
if lines4constraints_available(table):
amount_rows = np.size(table,0) # Подсчет количества строк с помощью axis=0
amount_columns = np.size(table,1) # Подсчет количества столбцов с помощью axis=1
xi = amount_columns - amount_rows -1 # Количество переменных
b = np.where(~table.any(axis=1))[0] # Поиск строк со всеми нулями
j = b[0] # Номер первой строки со всеми нулями
row = table[j, :] # Первая строка со всеми нулями
equation = input_to_list(equation)
i = 0
# Наполнение двумерного массива пока в выражении еще есть переменные
while i < len(equation) - 1:
# Переменные базиса записать в таблицу
row[i] = equation[i]
i += 1
row[-1] = equation[-1] #Колонка свободных членов
row[xi + j] = 1 # Создание 1 для единичной матрицы справа базиса
else:
print('Свободных строк для ограничений больше не было предусмотрено, начните сначала или введите целевую функцию.')
def line4objective_available(filled_table): # Проверка, есть ли нули в последней (и только последней) строке, чтобы вставить целевую функцию
last_rows = np.array(filled_table[-2:])
empty = np.where(~last_rows.any(axis=1))[0]
if np.array_equal(empty, np.array([1])):
return True
else:
return False
def add_objective(table, equation):
# добавляет целевую функцию, если есть свободная строка в конце таблицы
if line4objective_available(table):
equation = [float(i) for i in equation.split(',')]
amount_rows = np.size(table,0) # Подсчет количества строк с помощью axis=0
row = table[amount_rows - 1, :]
i = 0
# пока в выражении еще есть переменные
while i < len(equation)-1:
# записать их в таблицу
row[i] = equation[i] * -1
i += 1
row[-2] = 1 # Создание 1 для единичной матрицы справа базиса
row[-1] = equation[-1] #Хранение значения необходимого бюджета
else:
print('Остались пустые сроки, введите ограничения или начните сначала.')
def convert_min(filled_table): # Для минимизации последняя строка (целевая функция) умножается на -1
filled_table[-1] = -1 * filled_table[-1]
return filled_table
def check_bi_positifs(filled_table):
# Проверка, что в столбце свободных членов не осталось отрицательных
min_bi = min(filled_table[:-1,-1])
if min_bi >= 0:
return False # Достигнут оптимум, переход к выводу
else:
return True # Необходимо пересчитать строку
def check_cj0(filled_table):
#Проверка условия: все ли элементы последней строки отрицательны, cj<=0
amount_conditions = np.size(filled_table,0) # Подсчет количества строк с помощью axis=0
max_cj = max(filled_table[amount_conditions-1,:-1]) # Выбор наибольшего элемента в слайсе [последняя строка, вся таблица без последнего столбца]
# return (max_cj <= 0) # идентично
if max_cj >= 0:
return False #Переход к следующему шагу метода
else:
return True #Переход к выводу результата
def neg_bi_index(filled_table):
# Поиск номера строки с отрицательным свободным членом
amount_conditions = np.size(filled_table,0)-1 # Подсчет количества строк с помощью axis=0
min_bi_row_number = np.argmin(filled_table[:-1,amount_conditions])# + 1 Возвращает индекс наменьшего элемента в слайсе [вся таблица без последней строки, последний столбец], добавляем 1 чтобы номер столбца был человекочитаемым
return min_bi_row_number
def column_choice(filled_table):
#Выбор разрешающего столбца, cj
amount_conditions = np.size(filled_table, 0) - 1 # Подсчет количества строк с помощью axis=0
min_cj_column_number = np.argmin(filled_table[amount_conditions, :-1]) #+ 1 Возвращает индекс наибольшего элемента в слайсе [последняя строка, вся таблица без последнего столбца], добавляем 1 чтобы номер столбца был человекочитаемым
return min_cj_column_number
def bi_to_positive(filled_table):
division = [] # Разрешение двойственной задачи линейного программирования
row_index = neg_bi_index(filled_table)
min_in_row_column_number = np.argmin(filled_table[row_index,:-1]) #+1 Возвращает индекс наименьшего элемента в слайсе по номеру строки с отрицательным свободным членом, исключая последний столбец , добавляем 1 чтобы номер столбца был человекочитаемым
col = filled_table[:-1, min_in_row_column_number]
# все коэффициенты ограничений этого столбца
last_column = filled_table[:-1,-1]
for i, b in zip(col, last_column):
if i != 0 and b / i > 0:
division.append(b / i)
else:
division.append(10000)
index = division.index(min(division)) #наименьшее позитивное частное
return [index,min_in_row_column_number]
def row_choice(filled_table):
if check_cj0(filled_table):#Надо поднимать ошибку, если не срабатывает!!!!
c = column_choice(filled_table) #Номер разрешающего столбца в человеческом формате
divided = []
r_column = filled_table[:-1, c - 1] # [вся таблица без последней строки, столбец r в машинном формате]
last_column = filled_table[:-1, -1]
for r, l in zip(r_column, last_column):
if r > 0:
divided.append(l/r)
else:
divided.append(100000)
min_air_row_number = divided.index(min(divided)) #+1 #Номер разрешающей строки в человеческом формате
return [min_air_row_number, c]
def calc(chosen_row, chosen_column, source_table): #Пересчет элементов
target_table = np.zeros_like(source_table, dtype=float) # Создание новой нулевой таблицы
r = chosen_row #-1 # Номер разрешающей строки в машинном формате
c = chosen_column #-1 # Номер разрешающего столбца в машинном формате
pivot_row = source_table[r, :] # Разрешающая строка
pivot_element = source_table[r, c] # Разрешающий элемент
if pivot_element:
e = 1 / pivot_element
new_chosen_row = pivot_row * e # Делим все элементы разрешающей строки на разрешающий элемент
for i in range(np.size(source_table, 0)): # количеств строк с помощью axis=0
if i != r: # Если текущая строка отличается от поворотной строки
row = source_table[i, :] # Очередная строка
p_c_el = source_table[i, c] # Элемент текущей строки, который в разрешающем столбце
target_table[i, :] = list(row-new_chosen_row * p_c_el) # заменяем элементы очередной строки новой таблицы на вычисленные,
# домножаем на уже посчитанные элементы разрешающей строки, чтобы не повторять вычисление
target_table[r, :] = list(new_chosen_row) # Делим все элементы разрешающей строки на разрешающий элемент, заменяем элементы разрешающей строки на вычисленные
return target_table
else:
print('Разрешающий элемент не позволяет провести пересчет.')
def minimise(table, output='результаты'):
# Обращается к функциям пересчета и выводит результаты
table = convert_min(table)
while check_bi_positifs(table):
table = calc(
bi_to_positive(table)[0],
bi_to_positive(table)[1],
table
) # Жордановы замены для негативных свободных членов, требуются в двойственном симплексе
while check_cj0(table):
table = calc(
row_choice(table)[0],
row_choice(table)[1],
table
) # Жордановы замены для последней строки, коэффициентов при целевой функции
amount_columns = np.size(table,1) # Подсчет количества столбцов с помощью axis=1
amount_rows = np.size(table,0) # Подсчет количества строк с помощью axis=0
var = amount_columns - amount_rows -1
i = 0
val = {} # Создание пустого словаря для хранения результатов работы алгоритма
for i in range(var): # для солбцов переменных вне базиса, поиск числа в солбце и сопоставление числу результату в последнем столбце
col = table[:, i]
s = sum(col)
m = max(col)
if float(s) == float(m):
loc = np.where(col == m)[0][0]
val[xi_names(table)[i]] = table[loc, -1]
else:
val[xi_names(table)[i]] = 0
val['min'] = table[-1, -1] * -1 # для терминала результаты в виде словаря с ключами по номерам переменных x1,x2...xn и значением бюджета с ключом min
val = list(val.values()) # для джанги результаты в виде list # return [1, 2, 3]
if output == 'table':
return table
else:
return val
if __name__ == "__main__":
st = create_simplex_table(2, 4)
add_constraint(st,'2,5,>,30')
add_constraint(st,'-3,5,>,5')
add_constraint(st,'8,3,<,85')
add_constraint(st,'-9,7,<,42')
add_objective(st,'2,7,0')
print(minimise(st))
print('-'*100)
st = create_simplex_table(2,2)
add_constraint(st,'15,4,<,14')
add_constraint(st,'150,200,>,300')
add_objective(st,'250,210,0')
print(minimise(st))
print('-'*100)
st = create_simplex_table(2,2)
add_constraint(st,'1450,770,>,1100')
add_constraint(st,'560,120,>,800')
add_objective(st,'130,30,0')
print(minimise(st))
print('-'*100)
print('-'*100)
a = np.array([
[0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.],
[0.,0.,0.,0.,0.,0.]])
assert np.array_equal(create_simplex_table(2,2),a),"ошибка создания nparray"
test_table1 = np.array([
[4,6,1,0,0,120],
[2,6,0,1,0,72],
[0,1,0,0,1,10],
[2,4,0,0,0,0]])
test_table2 = np.array([
[4,0,1,0,-6,60],
[2,0,0,1,-6,12],
[0,1,0,0,1,10],
[2,0,0,0,-4,-40]])
test_table3 = np.array([
[0,0,1,-2,6,36],
[1,0,0,0.5,-3,6],
[0,1,0,0,1,10],
[0,0,0,-1,2,-52]])
test_table4 = np.array([
[0,0,1/6,-1/3,1,6],
[1,0,0.5,-0.5,0,24],
[0,1,-1/6,1/3,0,4],
[0,0,-1/3,-1/3,0,-64]])
assert check_cj0(test_table1)==False,"ошибка проверки условия cj<=0"
assert check_cj0(test_table2)==False,"ошибка проверки условия cj<=0"
assert check_cj0(test_table3)==False,"ошибка проверки условия cj<=0"
assert column_choice(test_table1)==2,"ошибка выбора разрешающего столбца"
assert np.array_equal(calc(3-1,2-1,test_table1),test_table2),"ошибка пересчета" # везде -1, использует индекс в машинном формате
assert np.array_equal(calc(2-1,1-1,test_table2),test_table3),"ошибка пересчета"
assert np.allclose(calc(1-1,5-1,test_table3),test_table4),"ошибка пересчета"
|
14,923 | 6ad575b90c3d3eb6d296ed16849d0cc30f02fa37 | # Python
from unittest import mock
import uuid
# patch python-ldap
with mock.patch('__main__.__builtins__.dir', return_value=[]):
import ldap # NOQA
# Load development settings for base variables.
from awx.settings.development import * # NOQA
# Some things make decisions based on settings.SETTINGS_MODULE, so this is done for that
SETTINGS_MODULE = 'awx.settings.development'
# Use SQLite for unit tests instead of PostgreSQL. If the lines below are
# commented out, Django will create the test_awx-dev database in PostgreSQL to
# run unit tests.
CACHES = {'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-{}'.format(str(uuid.uuid4()))}}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'awx.sqlite3'), # noqa
'TEST': {
# Test database cannot be :memory: for inventory tests.
'NAME': os.path.join(BASE_DIR, 'awx_test.sqlite3') # noqa
},
}
}
|
14,924 | 93046717752971c336289dc5c2e534e0f6e44897 | r=int(input())
m=[]
for i in range(r):
curr=[int(v) for v in input().split()]
m.append(curr)
n1,n2=input().split()
n1,n2=int(n1),int(n2)
print(m[n1-1][n2-1])
|
14,925 | 44b66a43b926b222989bce1ef70c2084d88d6bef | for _ in range(int(raw_input())):
print raw_input().replace('><', '<>').count('><') |
14,926 | 2ffd2630f9d7b5f34bc1a7a2bea25bb2d1cb8b67 | #!/usr/bin/python
import argparse
import subprocess
import sys
import os
##
# Set env
subenv = os.environ.copy()
def set_queue_host():
kafka_client_pod = (
subprocess.Popen(
"kubectl get pods |grep debug-kafka-client |awk '{print $1}'",
shell=True,
env=subenv,
stdout=subprocess.PIPE,
)
.stdout.read()
.strip()
)
cmd = "kubectl exec -it {0} env |grep ZENKO_QUEUE_SERVICE_HOST".format(
kafka_client_pod
)
cmd += "|gawk -F= '{print $2}'"
queue_host = (
subprocess.Popen(cmd, shell=True, env=subenv, stdout=subprocess.PIPE)
.stdout.read()
.strip()
)
return kafka_client_pod, queue_host
def list_consumer_groups(kafka_client_pod, queue_host):
scmd = "/opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server {0}:9092 --list".format(
queue_host
)
dcmd = "kubectl exec {0} -- {1}".format(kafka_client_pod, scmd)
cgroups = (
subprocess.Popen(dcmd, shell=True, env=subenv, stdout=subprocess.PIPE)
.stdout.read()
.strip()
)
print(cgroups)
def show_group(kafka_client_pod, queue_host, group):
scmd = "/opt/kafka/bin/kafka-consumer-groups.sh --bootstrap-server {0}:9092 --describe --group {1}".format(
queue_host, group
)
dcmd = "kubectl exec {0} -- {1}".format(kafka_client_pod, scmd)
group_txt = (
subprocess.Popen(dcmd, shell=True, env=subenv, stdout=subprocess.PIPE)
.stdout.read()
.strip()
)
print(group_txt)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Lists consumer groups and output lag. Make sure "kubectl" is set up in your shell before running'
)
parser.add_argument(
"--list", "-l", help="list available consumer groups", action="store_true"
)
parser.add_argument(
"--group", "-g", help="output lag values for consumer group", default=False
)
args = parser.parse_args()
kafka_client_pod, queue_host = set_queue_host()
if args.list == True:
list_consumer_groups(kafka_client_pod, queue_host)
elif args.group:
show_group(kafka_client_pod, queue_host, args.group)
|
14,927 | 77d3260848a5eed3b78bc2dc006ae2fcbdf25e08 | TEXTO = "Soy una constante" |
14,928 | a0b350345f082c09b82acf3145f79856274c22fe | import csv
virus_list = {}
def create_virus_list():
with open('data.csv', 'r') as csvfile:
i = 0
csv_reader = csv.reader(csvfile)
for disease in csv_reader:
if i > 1:
fatality = disease[1]
for index, character in enumerate(fatality):
if character == "%":
fatality = float(fatality[:index])
repr_rate = float(disease[2])
virus_list[disease[0]] = [fatality, repr_rate]
i += 1
# print(virus_list)
return virus_list
create_virus_list()
|
14,929 | f53874851e3b77620b78a9f291079fe6d16a1c76 | """
Django settings for robocat project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# This is the local, testing secret key
SECRET_KEY = 'y425xi4mz%t!dv2e*-*m((jvlt%$aqji33lxghrznhad)7im$2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'whitenoise.runserver_nostatic',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'graphene_django',
'corsheaders',
'teams.apps.TeamsConfig',
'matches.apps.MatchesConfig',
'schedules.apps.SchedulesConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# If a cache is added, htmlmin must be reordered. Check its PyPI page.
# 'htmlmin.middleware.HtmlMinifyMiddleware',
# 'htmlmin.middleware.MarkRequestMiddleware',
]
ROOT_URLCONF = 'robocat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'robocat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-gb'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/_/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static-root')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
_graphene_middleware = []
if DEBUG:
_graphene_middleware.append('graphene_django.debug.DjangoDebugMiddleware')
GRAPHENE = {
'SCHEMA': 'robocat.schema.schema',
'SCHEMA_OUTPUT': 'schema.json',
'SCHEMA_INDENT': 2,
'MIDDLEWARE': _graphene_middleware
}
# CORS
# CORS may need to be disabled during development to test frontend and backend
# separately, but in production they run from the same server, so CORS should
# be enabled.
CORS_ORIGIN_ALLOW_ALL = DEBUG
# WhiteNoise (static file framework)
# Uncomment to remove unhashed files. These should not be referenced
# and may reduce the size of the static file storage by a 25-50 %.
# However, it will impede DEBUG mode
# WHITENOISE_KEEP_ONLY_HASHED_FILES = True
WHITENOISE_AUTOREFRESH = False
WHITENOISE_USE_FINDERS = False
# HTML minifier
# Uncomment to minify also in DEBUG Mode
# HTML_MINIFY = True
# HTML comments (<!-- ... -->) will be kept. Django comments ({# ... #})
# will still be stripped
KEEP_COMMENTS_ON_MINIFYING = True
|
14,930 | ab02fd7afcc113fe2c1b9aa7697a1ea08e6f4b00 | from django.contrib.auth.models import User, Permission
from django.test import TestCase
from django_fsm import has_transition_perm
from testapp.models import BlogPost
class PermissionFSMFieldTest(TestCase):
def setUp(self):
self.model = BlogPost()
self.unpriviledged = User.objects.create(username='unpriviledged')
self.priviledged = User.objects.create(username='priviledged')
self.staff = User.objects.create(username='staff', is_staff=True)
self.priviledged.user_permissions.add(
Permission.objects.get_by_natural_key('can_publish_post', 'testapp', 'blogpost'))
self.priviledged.user_permissions.add(
Permission.objects.get_by_natural_key('can_remove_post', 'testapp', 'blogpost'))
def test_proviledged_access_succed(self):
self.assertTrue(has_transition_perm(self.model.publish, self.priviledged))
self.assertTrue(has_transition_perm(self.model.remove, self.priviledged))
transitions = self.model.get_available_user_state_transitions(self.priviledged)
self.assertEquals(set(['publish', 'remove', 'moderate']),
set(transition.name for transition in transitions))
def test_unpriviledged_access_prohibited(self):
self.assertFalse(has_transition_perm(self.model.publish, self.unpriviledged))
self.assertFalse(has_transition_perm(self.model.remove, self.unpriviledged))
transitions = self.model.get_available_user_state_transitions(self.unpriviledged)
self.assertEquals(set(['moderate']),
set(transition.name for transition in transitions))
def test_permission_instance_method(self):
self.assertFalse(has_transition_perm(self.model.restore, self.unpriviledged))
self.assertTrue(has_transition_perm(self.model.restore, self.staff))
|
14,931 | 1318c706fcea91ef086c8f52a7d13d103cc84bc6 | # 정수 2개 입력받아 비교하기4
a, b = map(int, input().split())
print(a!=b) |
14,932 | 574f832a403483e823aca6de990b54bcfba7426c | i = 0
v = 1
while i < 99:
v = v + v * (1/2)
i += 1
print(v)
|
14,933 | 316fdadcccd3dfdd634c40a49932e24c214c4a39 | #!/usr/bin/env python3
from ncclient import manager
conn = manager.connect(
host='192.168.24.252',
port='830',
username='netconf',
password='juniper!',
timeout=10,
device_params={'name':'junos'},
hostkey_verify=False)
result = conn.command('show version', format='text')
print(result.xpath('output')[0].text)
conn.close_session()
|
14,934 | c9194e8ff690165129b04479c1f925769954c347 | import logging
import argparse
import numpy as np
from sklearn.cluster import KMeans as sk_kmeans
from sklearn.cluster import MiniBatchKMeans as minibatch
from methods.util_models import *
from sklearn.metrics import pairwise_distances
class KMeans(sk_kmeans):
"""A KMeans class that will attempt to perform clustering and then pairwise distance calculations
and ranking. The query items supplied will be compared to cluster means and then all cluster items
belonging to that mean, before the next cluster. It can therefore reduce the number of distances
we need to calculate."""
def __init__(self, rank=10, clusters=1, iterations=3, metric='euclidean'):
""" Iterations is the max iterations """
sk_kmeans.__init__(self, n_clusters=clusters, max_iter=iterations)
# Cluster ranks is a list of lists of knn sorted elements for each cluster w.r.t. the cluster mean
self.rank = rank
self.metric = metric
def fit_predict(self, indexes, dataset_obj, sample_weight=None, sort_by_distance_to_mean=False):
"""Assign clusters.
If sort_by_distance_to_mean is True, then we will sort gallery items according to the distance to the cluster mean rather
than to the cluster mean rather than to the sample. This is not ideal."""
# Query data
query_data = dataset_obj.data_matx[dataset_obj.query_idx]
query_ids = dataset_obj.query_idx
# Gallery data
gallery_data = dataset_obj.data_matx[indexes]
gallery_ids = indexes
logging.info('Finding cluster mean positions.')
# Fitted is the gallery id cluster labels in order
fitted = sk_kmeans.fit_predict(
self, dataset_obj.data_matx[indexes], None, sample_weight=sample_weight)
logging.info('Done')
cluster_means = self.cluster_centers_
# Cluster ids for each different class
cluster_ids = [[x for x in range(len(cluster_means))] for i in range(len(query_ids))]
# Measure distances to cluster centres
cluster_distance_matrix = pairwise_distances(query_data, cluster_means, metric=self.metric)
cluster_ids_swapped = swap_indices(cluster_ids)
cluster_gallery_ids = []
cluster_gallery_data = []
for cluster in range(len(cluster_ids_swapped)):
valid_cluster_gallery_ids = gallery_ids[fitted == cluster]
valid_cluster_gallery_data = dataset_obj.data_matx[valid_cluster_gallery_ids]
cluster_gallery_ids.append(valid_cluster_gallery_ids)
cluster_gallery_data.append(valid_cluster_gallery_data)
gallery_distances_per_cluster = []
for cluster in cluster_gallery_data:
# Take only the gallery ids in the cluster
gallery_distance_for_cluster = pairwise_distances(query_data, cluster, metric=self.metric)
gallery_distances_per_cluster.append(gallery_distance_for_cluster)
gallery_distances_per_cluster_swapped = swap_indices(gallery_distances_per_cluster)
cluster_gallery_ids_stacked = [cluster_gallery_ids for i in range(len(gallery_distances_per_cluster_swapped))]
sorted_gallery_distances_per_query = []
sorted_gallery_ids_per_query = []
for cluster_distances, gallery_distances, gallery_ids, index in zip(cluster_distance_matrix, gallery_distances_per_cluster_swapped, cluster_gallery_ids_stacked, range(len(cluster_distance_matrix))):
sorted_gallery_distances_per_query.append(sort_by_another(gallery_distances, cluster_distances))
sorted_gallery_ids_per_query.append(sort_by_another(gallery_ids, cluster_distances))
num_query_items = len(sorted_gallery_distances_per_query)
num_clusters = len(gallery_ids)
num_gallery_items = len(gallery_data)
double_sorted_gallery_distances_per_query = [[] for i in range(num_query_items)]
double_sorted_gallery_ids_per_query = [[] for i in range(num_query_items)]
for query_item, query_item_id, index1 in zip(sorted_gallery_distances_per_query, sorted_gallery_ids_per_query, range(len(sorted_gallery_distances_per_query))):
for cluster, cluster_id, index2 in zip(query_item, query_item_id, range(len(query_item))):
sorted_gallery_distances = sort_by_another(cluster, cluster)
sorted_gallery_ids = sort_by_another(cluster_id, cluster)
double_sorted_gallery_distances_per_query[index1].append(sorted_gallery_distances)
double_sorted_gallery_ids_per_query[index1].append(sorted_gallery_ids)
final_distance_array = []
final_ids_array = []
for distances, indexes in zip(double_sorted_gallery_distances_per_query, double_sorted_gallery_ids_per_query):
final_distance_array.append([item for sublist in distances for item in sublist])
final_ids_array.append([item for sublist in indexes for item in sublist])
final_distance_array = np.array(final_distance_array)
final_ids_array = np.array(final_ids_array)
final_updated_distance_array = []
final_updated_ids_array = []
for distances, indexes, query_id in zip(final_distance_array, final_ids_array, range(num_query_items)):
mask = [id_is_valid(gal_id, query_id, dataset_obj) for gal_id in indexes]
redone_distances = np.append(distances[mask], ([-1] * 20))[:num_gallery_items]
redone_indexes = np.append(indexes[mask], ([-1] * 20))[:num_gallery_items]
final_updated_distance_array.append(redone_distances)
final_updated_ids_array.append(redone_indexes)
final_updated_distance_array = np.array(final_updated_distance_array)
final_updated_ids_array = np.array(final_updated_ids_array)
def gal_to_label(row_of_ids):
return dataset_obj.labels[row_of_ids]
final_updated_labels_array = np.stack([gal_to_label(row) for row in final_updated_ids_array])
tensor_array = torch.tensor(np.array(final_updated_labels_array, dtype=np.int32))
ranks = torch.stack([get_rank(row, i, dataset_obj) for i, row in enumerate(tensor_array)]).numpy()
ranked_count = np.bincount(ranks.flatten())[1:-1]
# CMC curve (percentage of query items which were in any particular rank or below)
self.ranked_acc = np.cumsum(ranked_count / dataset_obj.query_idx.shape[0])
return self
def sort_by_another(to_sort, basis):
"""Sorts to_sort based on the values in basis"""
return [x for (y, x) in sorted(zip(basis, to_sort), key=lambda pair: pair[0])]
def swap_indices(a_list):
"""Converts [[1,2,3], [4,5,6]] to [[1,4],[2,5],[3,6]]"""
new_list = []
for i in range(len(a_list[0])):
new_list.append([a_list[j][i] for j in range(len(a_list))])
return new_list
def id_is_valid(gal_id, query_id, data):
""" Returns False if the gal_id is invalid according to camera and label values """
return not ((data.cam_idx[query_id] == data.cam_idx[gal_id]) and (data.labels[query_id] == data.labels[gal_id]))
def extend_array(l, n):
"""Extend an array from the back. Useful to fill arrays that have been shortened and need to conform to a certain size."""
l.extend([-1] * n)
l = l[:n]
return l
def k_means(prev_args, data_set_obj):
"""The k_means entry function"""
parser = argparse.ArgumentParser(description='kmeans')
parser.add_argument('--clusters', required=True,
help='The number of clusters to use for kmeans.', type=int)
parser.add_argument('--iterations', default=300,
help='The maximum number of iterations for the algorithm.', type=int)
parser.add_argument('--metric', default='euclidean',
help='The distance metric to use.')
args, unknown = parser.parse_known_args()
kmeans = KMeans(prev_args.rank, args.clusters, args.iterations, args.metric)
kmeans.fit_predict(data_set_obj.gallery_idx, data_set_obj)
return kmeans.ranked_acc
|
14,935 | 0a352d08587cd19351c184bca87946a8cb336822 | # 学校:四川轻化工大学
# 学院:自信学院
# 学生:胡万平
# 开发时间:2021/9/18 15:27
#while循环
a = 1
#判断条件表达式
while a < 10:
#执行条件循环体
print(a)
a += 1 |
14,936 | f40d05ddc48dec484d1183f540a2719b7b0b8c93 | """securitysystem URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
from securitysystem import settings
from user.views import register_view, login_view, user_logout
urlpatterns = [
path('admin/', admin.site.urls),
path('user/', include('user.urls'), name='user'),
path('residence/', include('residence.urls'), name='residence'),
path('case/', include('case.urls'), name='case'),
path('', TemplateView.as_view(template_name='login.html')),
path('register', register_view, name='register'),
path('login', login_view, name='login'),
path('logout', user_logout, name='logout')
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
14,937 | 028c99c3a090098286ac53ef6340d8a6bc5697f9 |
# coding: utf-8
# # Project 2 on Bayesian Learning Regression
# In[1]:
import numpy as np
import scipy as sci
import pandas as pd
import math
from matplotlib import pyplot as plt
# In[2]:
#data raw prepare
train_100_10 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/train-100-10.csv')
train_100_100 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/train-100-100.csv')
train_1000_100 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/train-1000-100.csv')
train_f3 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/train-f3.csv')
train_f5 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/train-f5.csv')
trainR_100_10 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/trainR-100-10.csv')
trainR_100_100 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/trainR-100-100.csv')
trainR_1000_100 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/trainR-1000-100.csv')
trainR_f3 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/trainR-f3.csv')
trainR_f5 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/trainR-f5.csv')
# In[3]:
test_100_10 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/test-100-10.csv')
test_100_100 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/test-100-100.csv')
test_1000_100 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/test-1000-100.csv')
test_f3 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/test-f3.csv')
test_f5 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/test-f5.csv')
testR_100_10 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/testR-100-10.csv')
testR_100_100 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/testR-100-100.csv')
testR_1000_100 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/testR-1000-100.csv')
testR_f3 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/testR-f3.csv')
testR_f5 = pd.read_csv('/Users/jianwenliu/MLPP2/pp2data/testR-f5.csv')
# ## Task 1
# In[4]:
# the linear regression model
lamrange=np.arange(1,150)
def l_r(traininputs,traintargets,testinputs,testtargets):
xtrain=traininputs.values
ttrain=traintargets.values
xtest=testinputs.values
ttest=testtargets.values
mse_vec_tr=[]
mse_vec_te=[]
#loop on lambda
for lam in lamrange:
lambd = np.diag(np.ones(xtrain.shape[1])*lam)
w=np.dot(np.dot(np.linalg.inv(lambd+np.dot(xtrain.transpose(),xtrain)),xtrain.transpose()),ttrain)
mse_tr=np.sum(((np.dot(xtrain,w)-ttrain)**2))/ttrain.shape[0]
mse_vec_tr.append(mse_tr)
mse_te=np.sum(((np.dot(xtest,w)-ttest)**2))/ttest.shape[0]
mse_vec_te.append(mse_te)
#opimun_lamb_vec=lamrange[np.argmin(mse_vec_te)]
return mse_vec_tr,mse_vec_te#,opimun_lamb_vec
# In[5]:
# calculate the mse for 10_100 data
mse_vec_tr10,mse_vec_te10=l_r(train_100_10,trainR_100_10,test_100_10,testR_100_10)
mse_true = [3.78] * len(lamrange)
#plot the mse for 10_100 data
plt.plot(lamrange, mse_vec_tr10)
plt.plot(lamrange, mse_vec_te10)
plt.plot(lamrange, mse_true)
# calculate the optimun lambda for 10_100 data
opimun_lamb_10=lamrange[np.argmin(mse_vec_te10)]
print ('the optimum lambda is', opimun_lamb_10)
plt.xlabel("range of lambda")
plt.ylabel("mse")
plt.legend(["train", "test", "true"])
plt.show()
print ('the minimum mse is',min(mse_vec_te10))
# In[6]:
# calculate the mse for 100_100 data
mse_vec_tr100,mse_vec_te100=l_r(train_100_100,trainR_100_100,test_100_100,testR_100_100)
mse_true = [3.78] * len(lamrange)
plt.plot(lamrange, mse_vec_tr100)
plt.plot(lamrange, mse_vec_te100)
plt.plot(lamrange, mse_true)
# calculate the optimun lambda for 100_100 data
opimun_lamb_100=lamrange[np.argmin(mse_vec_te100)]
plt.xlabel("range of lambda")
plt.ylabel("mse")
plt.legend(["train", "test", "true"])
plt.show()
print ('the optimum lambda is', opimun_lamb_100)
print ('the minimum mse is',min(mse_vec_te100))
# In[7]:
# calculate the mse for 1000_100 data
mse_vec_tr1000,mse_vec_te1000=l_r(train_1000_100,trainR_1000_100,test_1000_100,testR_1000_100)
mse_true = [4.015] * len(lamrange)
plt.plot(lamrange, mse_vec_tr1000)
plt.plot(lamrange, mse_vec_te1000)
plt.plot(lamrange, mse_true)
# calculate the optimun lambda for 1000_100 data
opimun_lamb_1000=lamrange[np.argmin(mse_vec_te1000)]
plt.xlabel("range of lambda")
plt.ylabel("mse")
plt.legend(["train", "test", "true"])
plt.show()
print ('the optimum lambda is', opimun_lamb_1000)
print ('the minimum mse is',min(mse_vec_te1000))
# ## Q&A for Task 1
#
# Q:In your report provide the results/plots and discuss them: Why can't the training set MSE be used to select lambda ? How does lambda affect error on the test set? How does the choice of the optimal lambda vary with the number of features and number of examples? How do you explain these variations?
#
# A:The reason that we can not use MSE of training set to calculate lambda is that the the MSE of trainign data is alwasy increasing with the increase of lambda. This monotonic function makes it imcapable of measuring optimal lambda.
#
# The MSE of trainign data first decrease and then increase ultimately. This makes a optimun point of the lambda, which can be used to measure optimun lambda.
#
# when we fix the number of variables to 100, as the feature increses from 10 to 100, the optimun lambda also increase from 8 to 22.
# when we fix the number of variables to 100, as the feature increses from 100 to 1000, the optimun lambda also increase from 22 to 27.
# When we introduce the lambda, it becomes a regulalized lineaar regression. To minimized the loss function, we need the rugulized para lambda to increase. Both in the case of number of examples of number of features, as it increases, we need bigger lambda for balance to avoid overfitting.
# ## Task 2
# In[8]:
datasize=np.arange(10,800,20)
def l_C(lam,traininputs,traintargets,testinputs,testtargets):
mse_vec_te_mean=[]
for size in datasize:
mse_vec_te=[]
for repeat in range(15):
sample_in_100_row = [np.random.randint(0,998) for i in range(size)]
xtrain=traininputs.values[sample_in_100_row]
ttrain=traintargets.values[sample_in_100_row]
xtest=testinputs.values[sample_in_100_row]
ttest=testtargets.values[sample_in_100_row]
lambd = np.diag(np.ones(xtrain.shape[1])*lam)
w=np.dot(np.dot(np.linalg.inv(lambd+np.dot(xtrain.transpose(),xtrain)),xtrain.transpose()),ttrain)
mse_te=np.sum(((np.dot(xtest,w)-ttest)**2))/ttest.shape[0]
mse_vec_te.append(mse_te)
mse_vec_te_mean.append(np.mean(mse_vec_te))
return mse_vec_te_mean
# In[9]:
mse_vec_te_mean_lambda04=l_C(4,train_1000_100,trainR_1000_100,test_1000_100,testR_1000_100)
mse_vec_te_mean_lambda18=l_C(18,train_1000_100,trainR_1000_100,test_1000_100,testR_1000_100)
mse_vec_te_mean_lambda28=l_C(28,train_1000_100,trainR_1000_100,test_1000_100,testR_1000_100)
plt.plot(datasize, mse_vec_te_mean_lambda04)
plt.plot(datasize, mse_vec_te_mean_lambda18)
plt.plot(datasize, mse_vec_te_mean_lambda28)
plt.xlabel("range of datasize")
plt.ylabel("mse")
plt.legend(["lambda=4", "lambda=8", "lambda=28"])
plt.show()
# ## Q&A for Task 2
#
# Q:In your report provide the results/plots and discuss them: What can you observe from the plots regarding the dependence on lambda and the number of samples? Consider both the case of small training set sizes and large training set sizes. How do you explain these variations?
#
# A: First, as the lambda increase from 4 to 18 to 28, the MSE of train set will increase. Second, as the number of datasize increase from 0 to 800, the MSE will decrease too.
# In the big data size case, the mse of lambda4 is biger than lambda18, and the mse of lambda28 is the least. Because the lambda 28 is the closest to optimun lambda.
# In the big data size case, the mse of lambda4 is small than lambda18, and the mse of lambda28 is the largest.
# ## Task 3
# In[10]:
#the model selection function
def b_m_s(traininputs,traintargets):
alpha=1
beta=1
criterion=1
#data prepare
xtrain=traininputs.values
ttrain=traintargets.values
#set up a criterion to justify the optimun alpha and beta in irretation
while criterion>0.00001:
Sn=np.linalg.inv(np.identity(xtrain.shape[1])*alpha + beta*np.dot(xtrain.transpose(),xtrain))
Mn=beta*np.dot(Sn,np.dot(xtrain.transpose(),ttrain))
#eigen value of beta.x(transform)*x
lambd_blr=np.linalg.eigvals(beta*np.dot(xtrain.transpose(),xtrain))
gamma=sum(lambd_blr/(lambd_blr+alpha))
alpha_upda = gamma/(Mn**2).sum()
dist=0
for n in range(xtrain.shape[0]):
dist += (ttrain[n][0] - (Mn.transpose() * xtrain[n]).sum()) ** 2
beta_upda=(xtrain.shape[0]-gamma)/dist
#the form of criterion
criterion = abs(abs(alpha_upda - alpha)/alpha + abs(beta_upda-beta)/beta)
alpha = alpha_upda
beta = beta_upda
return alpha, beta
# In[11]:
#an example for 10-100 data
alpha_10,beta_10=b_m_s(train_100_10,trainR_100_10)
# In[12]:
print ('an example for alpha and beta in 10-100 data')
print ('alpha_10 is',alpha_10)
print ('beta_10 is', beta_10)
# In[13]:
#calculate the lambda and mse for the bayesian leaning regression
def b_l_r(traininputs,traintargets,testinputs,testtargets):
xtrain=traininputs.values
ttrain=traintargets.values
xtest=testinputs.values
ttest=testtargets.values
mse_vec_tr=[]
mse_vec_te=[]
alpha,beta=b_m_s(traininputs,traintargets)
lam=alpha/beta
lambd = np.diag(np.ones(xtrain.shape[1])*lam)
w=np.dot(np.dot(np.linalg.inv(lambd+np.dot(xtrain.transpose(),xtrain)),xtrain.transpose()),ttrain)
mse_tr=np.sum(((np.dot(xtrain,w)-ttrain)**2))/ttrain.shape[0]
mse_vec_tr.append(mse_tr)
mse_te=np.sum(((np.dot(xtest,w)-ttest)**2))/ttest.shape[0]
mse_vec_te.append(mse_te)
return lam,mse_vec_te
# In[14]:
lam_10, msete_10 = b_l_r(train_100_10,trainR_100_10,test_100_10,testR_100_10)
lam_100, msete_100 = b_l_r(train_100_100,trainR_100_100,test_100_100,testR_100_100)
lam_1000, msete_1000 = b_l_r(train_1000_100,trainR_1000_100,test_1000_100,testR_1000_100)
lam_f3, msete_f3 = b_l_r(train_f3,trainR_f3,test_f3,testR_f3)
lam_f5, msete_f5 = b_l_r(train_f5,trainR_f5,test_f5,testR_f5)
print ('Bayesian linear Regression Model Selection:')
print ('for 10-100, the lambda is')
print (lam_10)
print ('for 10-100, the test mse is')
print (msete_10)
print ('for 100-100, the lambda is')
print (lam_100)
print ('for 100-100, the test mse is')
print (msete_100)
print ('for 1000-100, the lambda is')
print (lam_1000)
print ('for 1000-100, the test mse is')
print (msete_1000)
print ('for f3, the lambda is')
print (lam_f3)
print ('for f3, the test mse is')
print (msete_f3)
print ('for f5, the lambda is')
print (lam_f5)
print ('for f5, the test mse is')
print (msete_f5)
# ## Q&A for Task 3
# Q:How do the results compare to the best test-set results from part 1 both in terms of the choice of lambda and test set MSE? (Note that our knowledge from part 1 is with hindsight of the test set, so the question is whether model selection recovers a solution which is close to the best in hindsight.) How does the quality depend on the number of examples and features?
#
# A: The bayesian linear regression give us a little smaller lambda compared to the result in 1. Meanwhile, the mse given in the bayesian learning are mostly larger than that given in the part 1 model.
# By comparing 1000-100, 100-100 and 10-100, we can found out that when the number of examples is much bigger than that of features, the quality is much better. Otherwise, the matrix is not full-mark, which will make it not invertable.
# ## Task 4
# In[15]:
#calculate logevidence and MSE for bayesian and non regularized model
def log_v(traininputs,traintargets,testinputs,testtargets):
#data prepare
xtrain_raw=traininputs.values
ttrain=traintargets.values
xtest_raw=testinputs.values
ttest=testtargets.values
#initialize empty vector for storage
alpha_vec=[]
beta_vec=[]
log_evidence_list=[]
mse_vec_te_nonreg=[]
mse_vec_te_bl=[]
#generate different dimensions
for d in range(10):
# poly = PolynomialFeatures(degree=d)
# xtrain = poly.fit_transform(xtrain_raw)
# xtest = poly.fit_transform(xtest_raw)
xtrain=np.power(xtrain_raw[:None],np.arange(0,d+1))
xtest=np.power(xtest_raw[:None],np.arange(0,d+1))
#for bayesian linear regression
alpha=8
beta=8
criterion=1
#calculate alpha and beta
while criterion>0.00001:
Sn=np.linalg.inv(np.identity(xtrain.shape[1])*alpha + beta*np.dot(xtrain.transpose(),xtrain))
Mn=beta*np.dot(Sn,np.dot(xtrain.transpose(),ttrain))
lambd_blr=np.linalg.eigvals(beta*np.dot(xtrain.transpose(),xtrain))
gamma=sum(lambd_blr/(lambd_blr+alpha))
alpha_upda = gamma/(Mn**2).sum()
dist=0
for n in range(xtrain.shape[0]):
dist += (ttrain[n][0] - (Mn.transpose() * xtrain[n]).sum()) ** 2
beta_upda=(xtrain.shape[0]-gamma)/dist
criterion = abs(alpha_upda - alpha)/alpha + abs(beta_upda-beta)/beta
alpha = alpha_upda
beta = beta_upda
#return alpha,beta
alpha_vec.append(alpha)
beta_vec.append(beta)
#caculate A
A=np.diag(np.ones(xtrain.shape[1])*alpha)+beta*np.dot(xtrain.transpose(),xtrain)
#caculate E(mn)
dist1=0
for n in range(xtrain.shape[0]):
dist1 += ((Mn.transpose() * xtrain[n]).sum()-ttrain[n][0] ) ** 2
EMN=beta*1/2*dist1+beta*1/2*(np.sum(Mn**2))
#calculate log evidence
log_evidence=xtrain.shape[1]/2*np.log(alpha)+xtrain.shape[0]/2*np.log(beta)-EMN-1/2*np.log(np.linalg.det(A))-xtrain.shape[0]/2*np.log(2*math.pi)
log_evidence_list.append(log_evidence)
#calculate mse for bl
'''
dist3=0
for n in range(ttest.shape[0]):
dist3 += (ttest[n][0]-Mn[0]) ** 2 #I try to use the Mn but the demension doesn't fit here
mse_te_bl=dist3/ttest.shape[0]
mse_vec_te_bl.append(mse_te_bl)
'''
lam=alpha/beta
lambd = np.diag(np.ones(xtrain.shape[1])*lam)
w=np.dot(np.dot(np.linalg.inv(lambd+np.dot(xtrain.transpose(),xtrain)),xtrain.transpose()),ttrain)
mse_te_bl=np.sum(((np.dot(xtrain,w)-ttrain)**2))/ttrain.shape[0]
mse_vec_te_bl.append(mse_te_bl)
#for non regularized linear regression
#it equals to set lambd_nonreg = np.diag(np.ones(xtrain.shape[1])*0)
w_nonreg=np.dot(np.dot(np.linalg.inv(np.dot(xtrain.transpose(),xtrain)),xtrain.transpose()),ttrain)
#calculate mse for non reg
mse_te_nonreg=np.sum(((np.dot(xtest,w_nonreg)-ttest)**2))/ttest.shape[0]
mse_vec_te_nonreg.append(mse_te_nonreg)
return alpha_vec,beta_vec,log_evidence_list,mse_vec_te_bl,mse_vec_te_nonreg
# In[16]:
#input data f3 and f5
alphaf3,betaf3,log_evidencef3,mse_vec_te_blf3,mse_vec_te_nonregf3=log_v(train_f3,trainR_f3,test_f3,testR_f3)
alphaf5,betaf5,log_evidencef5,mse_vec_te_blf5,mse_vec_te_nonregf5=log_v(train_f5,trainR_f5,test_f5,testR_f5)
# In[18]:
#plot alpha and beta on f3 and f5
plt.title("alpha on d")
plt.plot(range(10), alphaf3)
plt.xlabel("range of complexity")
plt.ylabel("alpha for bayesian leaning")
plt.plot(range(10), alphaf5)
plt.xlabel("range of complexity")
plt.ylabel("alpha for bayesian leaning")
plt.legend("f3","f5")
plt.show()
plt.title("beta on d")
plt.plot(range(10), betaf3)
plt.xlabel("range of complexity")
plt.ylabel("beta for bayesian leaning")
plt.plot(range(10), betaf5)
plt.xlabel("range of complexity")
plt.ylabel("beta for bayesian leaning")
plt.legend("f3","f5")
plt.show()
# In[17]:
#plot logevidence mse(bl) and mse(non regularized) on f3 data
plt.title("logevidence on d for f3")
plt.plot(range(10), log_evidencef3)
plt.xlabel("range of dimension")
plt.ylabel("log_evidence")
plt.show()
plt.title("mse for bl on d for f3")
plt.plot(range(10), mse_vec_te_blf3)
plt.xlabel("range of dimension")
plt.ylabel("mse for bl")
plt.show()
plt.title("mse for non regularized on d for f3")
plt.plot(range(10), mse_vec_te_nonregf3)
plt.xlabel("range of dimension")
plt.ylabel("mse for nonreg")
plt.show()
plt.title("logevidence on d for f5")
plt.plot(range(10), log_evidencef5)
plt.xlabel("range of dimension")
plt.ylabel("log_evidence")
plt.show()
plt.title("mse for bl on d for f5")
plt.plot(range(10), mse_vec_te_blf5)
plt.xlabel("range of dimension")
plt.ylabel("mse for bl")
plt.show()
plt.title("mse for non regularized on d for f5")
plt.plot(range(10), mse_vec_te_nonregf5)
plt.xlabel("range of dimension")
plt.ylabel("mse for nonreg")
plt.show()
# ## Q&A for Task 4
#
# Q:Can the evidence be used to successfully select alpha, beta and d for the Bayesian method? How does the non-regularized model fare in these runs? (Note that evidence is only relevant for the Bayesian method and one would need some other method to select d in this model)
#
# A: The evidence can be used to successfully select alpha, beta, and for d for the Bayesian method. For each d, we show the responding alpha and beta. And more importantly, we can see that it gives the optimum d, at about d=3 with the maximum log evidence. In the Bayesian Learning model, when we try to calculate the MSE of test data, we can also see that it firstly decrease with the number of features,and then it has a increases (for f5 but not for f3 in the graph). This is because as the dimension increases, the variance will contribute more than bias, which leads to potential U curve MSE on test deta. In the non-regularized model, the model keep decreasing when the complexity is big. This is because when it is not regularized, the bias of the model will decrase as more demension we have in the model.
|
14,938 | d39f1976dcf03b9a0a333a528cb9b3b512cdd93c | from django.apps import AppConfig
class BusybeeConfig(AppConfig):
name = 'busybee'
|
14,939 | f7451922a2703e3d1533d812c5d85f0ea152fc5f | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2019 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""
python anim_player.py [options] skeleton animation.anim...
Arguments
skeleton: a python file with a skeleton variable inside which has
the skeleton you will want to animate
animation.anim... : a list of animation file names, each of this files
will be asigned to a number 1-0
More info: see the section skeleton in the cocos programming guide"
"""
from __future__ import division, print_function, unicode_literals
import math
try:
import cPickle as pickle
except ImportError:
import pickle
import cocos
from cocos.director import director
from cocos.sprite import Sprite
import pyglet
from pyglet.gl import *
from pyglet.window import key
import ui
import animator
from cocos.skeleton import Bone, Skeleton, Skin, Animation, Animate
class Player(cocos.layer.Layer):
""" Skeletal animation player example
we take a skeleton and a list of animations and let the player
choose what animation to play and how
"""
is_event_handler = True
def __init__(self, sk, skin, *anims):
super(Player, self).__init__()
self.skeleton = sk
self.anims = [ pickle.load(open(a, "rb")) for a in anims ]
# we create a skin. Skins are what are rendered.
# skins also are cocos nodes, so we add it to ourselves
self.skin = animator.BitmapSkin(self.skeleton, skin)
self.add( self.skin )
x, y = director.get_window_size()
self.skin.position = x // 2, y // 2
self.translate = False
self.flipped = False
def on_key_press(self, k, mod):
numbers = [key._1, key._2, key._3, key._4, key._5,
key._6, key._7, key._8, key._9, key._0 ]
if k == key.T:
# track if the user wants to translate origin to the current
# skeleton position
# if you run two walk left animations without translation
# you will see a player move left, go to the origin and move
# left again.
# if you use translation, the player will just move left twice
# as far
self.translate = not self.translate
if k == key.F:
# track if the user wants to run the animation normal or flipped
# if the animation is a guy walking left, when flipped it will
# walk right
self.flipped = not self.flipped
self.skin.flip()
if k in numbers:
# find which animation the user wants to run
n = numbers.index(k)
if n < len(self.anims):
# kill current animations
self.skin.stop()
anim = self.anims[n]
# if we want to run the animation flipped, we create
# the flipped version
if self.flipped:
anim = anim.flipped()
# we run the animation on the skin using the Animate action.
# remember that Animate is a cocos action, so you can do
# any action stuff you want with them.
# you just have to say which animation you want to use
# and what kind of translation
self.skin.do( Animate( anim , recenter_x=self.translate ) )
if __name__ == "__main__":
import sys, imp, os
p = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__).replace("\\", "/"), "../data")
))
pyglet.resource.path.append(p)
pyglet.resource.reindex()
director.init()
if len(sys.argv)<3:
print(__doc__)
sys.exit()
skin_data = imp.load_source("skin", sys.argv[2]).skin
sk_file = imp.load_source("skeleton", sys.argv[1])
player = Player(sk_file.skeleton, skin_data, *sys.argv[3:])
director.run(cocos.scene.Scene(player))
|
14,940 | 8cdeb9163120a754dcc276043870e4479327e17b | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Michal Rezny <miisha@seznam.cz>'
__docformat__ = 'restructuredtext en'
import socket, re, datetime, math
from threading import Thread
from lxml.html import fromstring, tostring
from calibre.ebooks.metadata.book.base import Metadata
from calibre.library.comments import sanitize_comments_html
from calibre.utils.cleantext import clean_ascii_chars
from calibre.utils.icu import lower
class Worker(Thread): # Get details
'''
Get book details from Webscription book page in a separate thread
'''
def __init__(self, url, match_authors, result_queue, browser, log, relevance, plugin, timeout=20):
Thread.__init__(self)
self.daemon = True
self.url, self.result_queue = url, result_queue
self.match_authors = match_authors
self.log, self.timeout = log, timeout
self.relevance, self.plugin = relevance, plugin
self.browser = browser.clone_browser()
self.cover_url = self.legie_id = self.isbn = None
def run(self):
try:
self.get_details()
except:
self.log.exception('get_details failed for url: %r'%self.url)
def get_details(self):
try:
self.log.info('Legie url: %r'%self.url)
raw = self.browser.open_novisit(self.url, timeout=self.timeout).read().strip()
except Exception as e:
if callable(getattr(e, 'getcode', None)) and \
e.getcode() == 404:
self.log.error('URL malformed: %r'%self.url)
return
attr = getattr(e, 'args', [None])
attr = attr if attr else [None]
if isinstance(attr[0], socket.timeout):
msg = 'Legie timed out. Try again later.'
self.log.error(msg)
else:
msg = 'Failed to make details query: %r'%self.url
self.log.exception(msg)
return
raw = raw.decode('utf-8', errors='replace')
#open('E:\\t3.html', 'wb').write(raw)
if '<title>404 - ' in raw:
self.log.error('URL malformed: %r'%self.url)
return
try:
root = fromstring(clean_ascii_chars(raw))
except:
msg = 'Failed to parse Legie details page: %r'%self.url
self.log.exception(msg)
return
self.parse_details(root)
def parse_details(self, root):
try:
legie_id = self.parse_legie_id(self.url)
except:
self.log.exception('Error parsing Legie id for url: %r'%self.url)
legie_id = None
try:
title = self.parse_title(root)
except:
self.log.exception('Error parsing title for url: %r'%self.url)
title = None
try:
authors = self.parse_authors(root)
except:
self.log.exception('Error parsing authors for url: %r'%self.url)
authors = []
if not title or not authors or not legie_id:
self.log.error('Could not find title/authors/Legie id for %r'%self.url)
self.log.error('Legie: %r Title: %r Authors: %r'%(legie_id, title,
authors))
return
self.legie_id = legie_id
rating = comments = series = series_index = None
try:
rating = self.parse_rating(root)
except:
self.log.exception('Error parsing ratings for url: %r'%self.url)
try:
comments = self.parse_comments(root)
except:
self.log.exception('Error parsing comments for url: %r'%self.url)
try:
(series,series_index) = self.parse_series(root)
except:
self.log.info('Series not found.')
try:
tags = self.parse_tags(root)
except:
self.log.exception('Error parsing tags for url: %r'%self.url)
tags = None
if legie_id:
editions = self.get_editions()
if editions:
num_editions = len(editions)
self.log.info('Nalezeno %d vydani'%num_editions)
for edition in editions:
(year, cover_url, publisher, isbn) = edition
mi = Metadata(title, authors)
self.legie_id = "%s#%s"%(legie_id,year)
mi.set_identifier('legie', self.legie_id)
mi.source_relevance = self.relevance
mi.rating = rating
mi.comments = comments
mi.series = series
mi.series_index = series_index
if cover_url:
mi.cover_url = self.cover_url = cover_url
self.plugin.cache_identifier_to_cover_url(self.legie_id, self.cover_url)
if tags:
mi.tags = tags
mi.has_cover = bool(self.cover_url)
mi.publisher = publisher
mi.isbn = isbn
mi.pubdate = self.prepare_date(int(year))
mi.language = "ces"
self.result_queue.put(mi)
else:
mi = Metadata(title, authors)
mi.set_identifier('legie', self.legie_id)
mi.source_relevance = self.relevance
mi.rating = rating
mi.comments = comments
mi.series = series
mi.series_index = series_index
try:
self.cover_url = self.parse_cover(root)
except:
self.log.exception('Error parsing cover for url: %r'%self.url)
if tags:
mi.tags = tags
mi.has_cover = bool(self.cover_url)
mi.publisher = publisher
mi.isbn = isbn
mi.pubdate = self.prepare_date(int(year))
mi.language = "ces"
self.result_queue.put(mi)
if self.legie_id:
if self.cover_url:
self.plugin.cache_identifier_to_cover_url(self.legie_id, self.cover_url)
def parse_legie_id(self, url):
return re.search('/kniha/(\d+)', url).groups(0)[0]
def parse_title(self, root):
title_node = root.xpath('//h2[@id="nazev_knihy"]')
if title_node:
self.log.info('Title: %s'%title_node[0].text)
return title_node[0].text
def parse_authors(self, root):
author_nodes = root.xpath('//div[@id="pro_obal"]/../h3/a')
if author_nodes:
authors = []
for author_node in author_nodes:
author = author_node.text.strip()
authors.append(author)
else:
self.log.info('No author has been found')
def ismatch(authors):
authors = lower(' '.join(authors))
amatch = not self.match_authors
for a in self.match_authors:
if lower(a) in authors:
amatch = True
break
if not self.match_authors: amatch = True
return amatch
if not self.match_authors or ismatch(authors):
return authors
self.log('Rejecting authors as not a close match: ', ','.join(authors))
def parse_comments(self, root):
description_nodes = root.xpath('//div[@id="anotace"]/strong/following-sibling::p')
if not description_nodes:
description_nodes = root.xpath('//div[@id="nic"]/strong/following-sibling::p')
if description_nodes:
comments = []
for node in description_nodes:
node_text = node.text_content()
if node_text != None:
comments.append("<p>" + node_text + "</p>")
#comments = tostring(description_node, method='html')
comments = sanitize_comments_html("".join(comments))
return comments
else:
self.log.info('No comment node was found.')
def parse_cover(self, root):
cover_node = root.xpath('//img[@id="hlavni_obalka"]/@src')
if cover_node:
cover_url = 'http://www.legie.info/' + cover_node[0]
return cover_url
def parse_rating(self, root):
rating_node = root.xpath('//div[@id="procenta"]/span[1]')
if rating_node:
rating_string = rating_node[0].text
if len(rating_string) > 0:
stars_ = int(rating_string)
rating_value = float(stars_ / 20)
self.log('Found rating:%s'%rating_value)
return rating_value
else:
self.log.info('Rating node not found')
def parse_series(self, root):
series_node = root.xpath('//div[@id="kniha_info"]/div/p[starts-with(text(),"série:")]')
if series_node:
series_name_node = series_node[0].xpath('./a[1]')
if series_name_node:
series_name = series_name_node[0].text
else:
return (None,None)
series_text = series_node[0].text_content()
match = re.search('díl v sérii: (\d+)',series_text)
if match:
self.log.info('Series Index found: %s'%match.groups(0)[0])
return (series_name, int(match.groups(0)[0]))
else:
self.log.info('Series: %s, Index not found'%series_name)
return (series_name, None)
else:
self.log.info('Series node not found')
return (None, None)
def parse_tags(self,root):
tags = []
tags_nodes = root.xpath('//div[@id="kniha_info"]/div/p[starts-with(text(),"Kategorie:")]/a')
if tags_nodes:
for node in tags_nodes:
tags.append(node.text)
return tags
def get_editions(self):
url_parts = self.url.split('#')
if len(url_parts) == 2:
base_url,edition_year = url_parts
else:
base_url = url_parts[0]
edition_year = None
url = '%s/vydani'%(base_url)
try:
self.log.info('Legie url: %r'%url)
raw = self.browser.open_novisit(url, timeout=self.timeout).read().strip()
except Exception as e:
if callable(getattr(e, 'getcode', None)) and \
e.getcode() == 404:
self.log.error('URL malformed: %r'%url)
return
attr = getattr(e, 'args', [None])
attr = attr if attr else [None]
if isinstance(attr[0], socket.timeout):
msg = 'Legie timed out. Try again later.'
self.log.error(msg)
else:
msg = 'Failed to make details query: %r'%url
self.log.exception(msg)
return
raw = raw.decode('utf-8', errors='replace')
#open('E:\\t3.html', 'wb').write(raw)
if '<title>404 - ' in raw:
self.log.error('URL malformed: %r'%url)
return
try:
root = fromstring(clean_ascii_chars(raw))
except:
msg = 'Failed to parse Legie details page: %r'%url
self.log.exception(msg)
return
self.log.info('Trying to parse editions')
try:
editions = self.parse_editions(root,edition_year)
except:
self.log.exception('Failed to parse editions page')
editions = []
return editions
def parse_editions(self, root, edition_year):
editions = []
edition_nodes = root.xpath('//div[@id="vycet_vydani"]/div[@class="vydani cl"]')
year = cover_url = publisher = isbn = None
if edition_nodes:
for node in edition_nodes:
year_node = node.xpath('./h3/a/text()')
if year_node:
year = year_node[0]
cover_node = node.xpath('./div[@class="ob"]/img/@src')
if cover_node:
if cover_node[0] != 'images/kniha-neni.jpg':
cover_url = 'http://www.legie.info/' + cover_node[0]
publisher_node = node.xpath('./div[@class="data_vydani"]/a[@class="large"]/text()')
if publisher_node:
publisher = publisher_node[0]
isbn_node = node.xpath('.//span[@title="ISBN-International Serial Book Number / mezinarodni unikatni cislo knihy"]/following-sibling::text()')
if isbn_node:
match = re.search('([0-9\-xX]+)',isbn_node[0])
if match:
isbn = match.groups(0)[0].upper()
if year == edition_year:
return [(year, cover_url, publisher, isbn)]
editions.append((year, cover_url, publisher, isbn))
else:
self.log.info("No edition nodes")
return editions
def prepare_date(self,year):
from calibre.utils.date import utc_tz
return datetime.datetime(year, 1, 1, tzinfo=utc_tz) |
14,941 | bc55c045de07b5802ab40d63b178bfcb4210f7ff | from cc3d.core.PySteppables import *
class SBMLSolverSteppable(SteppableBasePy):
def __init__(self, frequency=1):
SteppableBasePy.__init__(self, frequency)
def start(self):
# Antimony model string
model_string = """model test_1_Antimony()
# Model
S1 => S2; k1*S1
# Initial conditions
S1 = 1
S2 = 0
k1 = 1
end"""
# Antimony model file containing same model as model_string
model_file = 'Simulation/test_1_Antimony.txt'
# adding options that setup SBML solver integrator
# these are optional but useful when encountering integration instabilities
options = {'relative': 1e-10, 'absolute': 1e-12}
self.set_sbml_global_options(options)
# Apply model_string to first ten cells, and model_file to the rest: should result in a uniform model assignment
for cell in self.cell_list:
if cell.id < 10:
self.add_antimony_to_cell(model_string=model_string, model_name='dp', cell=cell, step_size=0.0025)
else:
self.add_antimony_to_cell(model_file=model_file, model_name='dp', cell=cell, step_size=0.0025)
def step(self, mcs):
self.timestep_sbml()
def finish(self):
# this function may be called at the end of simulation - used very infrequently though
return
# Demo: accessing SBML values for further manipulation/coupling with other components
class IdFieldVisualizationSteppable(SteppableBasePy):
def __init__(self, frequency=1):
SteppableBasePy.__init__(self, frequency)
def start(self):
self.create_scalar_field_cell_level_py("IdFieldS1")
self.create_scalar_field_cell_level_py("IdFieldS2")
def step(self, mcs):
id_field_s1 = self.field.IdFieldS1
id_field_s2 = self.field.IdFieldS2
for cell in self.cell_list:
sbml_values = cell.sbml.dp.values()
id_field_s1[cell] = sbml_values[0]
id_field_s2[cell] = sbml_values[1]
if cell.id == 1:
print(sbml_values)
|
14,942 | febd42e19bd79934c78d37405ae12c39ef929000 | #!/usr/bin/python
from __future__ import print_function
import solidscraper as ss
import traceback
import argparse
import codecs # utf-8 text files
import json
import os
from collections import defaultdict
from datetime import datetime
from datetime import timedelta
from dateutil import tz
import time
def toFixed(strn, length):
if isinstance(strn, list) or type(strn) == int:
strn = str(strn)
return (u"{:<%i}" % (length)).format(strn[:length])
def sumc(collection):
total = 0
if type(collection) == list or type(collection) == set or type(collection) == tuple:
for e in collection:
total += e
else:
for e in collection:
total += collection[e]
return float(total)
parser = argparse.ArgumentParser(
description='LIDIC Twitter Scraper v.1.1',
epilog=(
"Author: Burdisso Sergio (<sergio.burdisso@gmail.com>), Phd. Student. "
"LIDIC, Department of Computer Science, National University of San Luis"
" (UNSL), San Luis, Argentina."
)
)
parser.add_argument('USER', help="target's twitter user name")
args = parser.parse_args()
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
_TIMEZONE_ = tz.gettz('America/Buenos_Aires')
_UTC_TIMEZONE_ = tz.gettz('UTC')
ss.setVerbose(False)
ss.scookies.set("lang", "en")
ss.setUserAgent(ss.UserAgent.CHROME_LINUX)
_XML_TEMPLATE = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<author type="twitter" url="https://twitter.com/%s" id="%s" name="%s" join_date="%s" location="%s" personal_url="%s" tweets="%s" following="%s" followers="%s" favorites="%s" age_group="xx" gender="xx" lang="xx">
<biography>
<![CDATA[%s]]>
</biography>
<documents count="%s">%s
</documents>
</author>"""
_XML_TWEET_TEMPLATE = """
<document id="%s" timestamp="%s" lang="%s" url="https://twitter.com/%s/status/%s"><![CDATA[%s]]></document>
"""
def scrapes(user):
_XML_TWEETS = ""
_XML_ = ""
_USER_ = user
logged_in = True
has_more_items = True
min_position = ""
items_html = ""
document = None
i = 0
user_id = 0
user_bio = ""
user_url = ""
user_name = ""
user_favs = 0
user_tweets = 0
user_isFamous = False
user_location = ""
user_joinDate = ""
user_following = 0
user_followers = 0
tweet_counter = 0
comments_counter = 0
mention_tweet_counter = 0
url_tweet_counter = 0
retweet_counter = 0
tweet_id = 0
tweet_lang = ""
tweet_raw_text = ""
tweet_datetime = None
tweet_mentions = None
tweet_hashtags = None
tweet_owner_id = 0
tweet_retweeter = False
tweet_timestamp = 0
tweet_owner_name = ""
tweet_owner_username = ""
dict_mentions_mutual = defaultdict(lambda: 0)
dict_mentions_user = defaultdict(lambda: 0)
dict_mentions_p = defaultdict(lambda: 0)
dict_hashtag_p = defaultdict(lambda: 0)
dict_retweets = defaultdict(lambda: 0)
dict_mentions = defaultdict(lambda: 0)
dict_hashtag = defaultdict(lambda: 0)
dict_lang_p = defaultdict(lambda: 0)
dict_lang = defaultdict(lambda: 0)
_time_start_ = time.time()
print("\nAccessing %s profile on twitter.com..." % (_USER_))
error = True
while error:
try:
user_url = "/%s/with_replies"
res = ss.get(user_url % (_USER_), redirect=False)
if res.status // 100 != 2:
print("It looks like you're not logged in, I'll try to collect only what is public")
logged_in = False
user_url = "/%s"
document = ss.load(user_url % (_USER_))
if not document:
print("nothing public to bee seen... sorry")
return
error = False
except:
time.sleep(5)
profile = document.select(".ProfileHeaderCard")
# user screenname
_USER_ = profile.select(
".ProfileHeaderCard-screenname"
).then("a").getAttribute("href")
if not _USER_:
return
_USER_ = _USER_[0][1:]
_BASE_DIR_ = "_OUTPUT_/%s/" % (_USER_)
_BASE_PHOTOS = _BASE_DIR_ + "photos/"
_BASE_PHOTOS_PERSONAL = _BASE_PHOTOS + "personal/"
_BASE_PHOTOS_EXTERN = _BASE_PHOTOS + "extern/"
try:
os.makedirs(_BASE_PHOTOS_PERSONAL)
except:
pass
try:
os.makedirs(_BASE_PHOTOS_EXTERN)
except:
pass
# Is Famous
user_isFamous = True if profile.select(".Icon--verified") else False
# Name
user_name = profile.select(".ProfileHeaderCard-name").then("a .ProfileHeaderCard-nameLink").text()
# Biography
user_bio = profile.select(".ProfileHeaderCard-bio").text()
# Location
user_location = profile.select(".ProfileHeaderCard-locationText").text()
# Url
user_url = profile.select(".ProfileHeaderCard-urlText").then("a").getAttribute("title")
user_url = user_url[0] if user_url else ""
# Join Date
user_joinDate = profile.select(".ProfileHeaderCard-joinDateText").getAttribute("title")
user_joinDate = user_joinDate[0] if user_joinDate else ""
profileNav = document.select(".ProfileNav")
# user id
user_id = profileNav.getAttribute("data-user-id")[0]
# tweets
user_tweets = profileNav.select(".ProfileNav-item--tweets").then("a").getAttribute("title")
user_tweets = user_tweets[0].split(" ")[0].replace(",", "") if user_tweets else 0
# following
user_following = profileNav.select(".ProfileNav-item--following").then("a").getAttribute("title")
user_following = user_following[0].split(" ")[0].replace(",", "") if user_following else 0
# followers
user_followers = profileNav.select(".ProfileNav-item--followers").then("a").getAttribute("title")
user_followers = user_followers[0].split(" ")[0].replace(",", "") if user_followers else 0
# favorites
user_favs = profileNav.select(".ProfileNav-item--favorites").then("a").getAttribute("title")
if user_favs:
user_favs = user_favs[0].split(" ")[0].replace(",", "")
else:
user_favs = ""
user_profilePic = document.select(".ProfileAvatar").andThen("img").getAttribute("src")[0]
print("\n> downloading profile picture...")
ss.download(user_profilePic, _BASE_PHOTOS)
print("\n\nAbout to start downloading user's timeline:")
timeline_url = "https://twitter.com/i/profiles/show/%s/timeline/" % (_USER_)
timeline_url += "%s?include_available_features=1&include_entities=1" % ("with_replies" if logged_in else "tweets")
while has_more_items:
try:
print("\n> downloading timeline chunk [ %s of %s tweets so far, max_position=%s]... \n" % (tweet_counter + retweet_counter, user_tweets, min_position))
if not min_position:
r = ss.get(timeline_url)
if not r:
break
else:
r = ss.get(timeline_url + "&max_position=%s" % min_position)
if not r:
break
try:
j = json.loads(r.body)
except:
print("[*] Error while trying to parse the JSON response, aborting...")
has_more_items = False
break
items_html = j["items_html"].encode("utf8")
document = ss.parse(items_html)
items_html = document.select("li")
for node in items_html:
node = node.select("@data-tweet-id")
if node:
node = node[0]
else:
continue
tweet_id = node.getAttribute("data-tweet-id")
tweet_owner_id = node.getAttribute("data-user-id")
tweet_owner_username = node.getAttribute("data-screen-name")
tweet_owner_name = node.getAttribute("data-name")
tweet_retweeter = node.getAttribute("data-retweeter")
tweet_mentions = node.getAttribute("data-mentions")
tweet_mentions = tweet_mentions.split() if tweet_mentions else []
tweet_raw_text = node.select(".tweet-text").text()
tweet_lang = node.select(".tweet-text").getAttribute("lang")
tweet_lang = tweet_lang[0] if tweet_lang else ""
tweet_timestamp = int(node.select("@data-time-ms").getAttribute("data-time")[0])
tweet_hashtags = []
tweet_iscomment = node.getAttribute("data-is-reply-to") == "true"
for node_hashtag in node.select(".twitter-hashtag"):
hashtag = node_hashtag.text().upper().replace("#.\n", "")
tweet_hashtags.append(hashtag)
dict_hashtag[hashtag] += 1
if not tweet_retweeter:
dict_hashtag_p[hashtag] += 1
tweet_links = [link for link in node.select(".tweet-text").then("a").getAttribute("href") if link.startswith("http")]
# updating counters
tweet_owner_username = tweet_owner_username.upper()
for uname in tweet_mentions:
if uname.upper() == _USER_.upper():
dict_mentions_user[tweet_owner_username] += 1
tweet_datetime = datetime.fromtimestamp(tweet_timestamp).replace(tzinfo=_UTC_TIMEZONE_).astimezone(_TIMEZONE_)
for usermen in tweet_mentions:
dict_mentions[usermen.upper()] += 1
if not tweet_retweeter:
dict_mentions_p[usermen.upper()] += 1
dict_lang[tweet_lang] += 1
if tweet_retweeter:
retweet_counter += 1
dict_retweets[tweet_owner_username] += 1
else:
if tweet_owner_id == user_id:
dict_lang_p[tweet_lang] += 1
# updating counters
tweet_counter += 1
if tweet_iscomment:
comments_counter += 1
if len(tweet_mentions):
mention_tweet_counter += 1
if len(tweet_links):
url_tweet_counter += 1
_XML_TWEETS += _XML_TWEET_TEMPLATE % (
tweet_id,
tweet_timestamp,
tweet_lang,
_USER_,
tweet_id,
tweet_raw_text
)
print(
"|%s |%s[%s]%s\t|%s |%s |%s |%s |%s"
%
(
toFixed(tweet_datetime.isoformat(" "), 16),
tweet_id,
tweet_lang,
"r" if tweet_retweeter else ("c" if tweet_iscomment else ""),
toFixed(tweet_owner_id, 10),
toFixed(tweet_owner_username, 16),
toFixed(tweet_owner_name, 19),
toFixed(tweet_mentions + tweet_hashtags, 10),
toFixed(tweet_raw_text, 54) + "..."
)
)
if len(node.select("@data-image-url")):
img_list = node.select("@data-image-url")
len_imgs = len(img_list)
print("\n" + "- " * 61)
if tweet_retweeter:
print("\t> %i extern photo found" % (len_imgs))
imgs_base_path = _BASE_PHOTOS_EXTERN
else:
print("\t> %i personal photo(s) found" % (len_imgs))
imgs_base_path = _BASE_PHOTOS_PERSONAL
for elem in img_list:
img_url = elem.getAttribute("data-image-url")
print("\t\tdownloading photo from %s... \n" % (img_url))
ss.download(img_url, imgs_base_path)
print("- " * 61 + " \n")
elif node.getAttribute("data-card2-type") == "player":
print("\n" + "- " * 61)
video = ss.load("https://twitter.com/i/cards/tfw/v1/%s?cardname=player&earned=true"%(tweet_id))
video_title = video.select(".TwitterCard-title").text()
video_url = video.select("iframe").getAttribute("src")[0]
print("\t> new video '%s' found [ %s ]" % (video_title, video_url))
print("\n" + "- " * 61)
min_position = tweet_id
has_more_items = j["has_more_items"] and items_html
i += 1
except Exception as e:
print("------------------------------------------")
traceback.print_stack()
print("[ error: %s ]" % str(e))
print("[ trying again... ]")
time.sleep(5)
print("\nprocess finished successfully! =D -- time:", timedelta(seconds=time.time() - _time_start_) , " --")
_XML_ = _XML_TEMPLATE % (
_USER_,
user_id,
user_name,
user_joinDate,
user_location,
user_url,
user_tweets,
user_following,
user_followers,
user_favs,
user_bio.replace("\r\n", ""),
tweet_counter,
_XML_TWEETS
)
fxml = codecs.open("%s%s.xml" % (_BASE_DIR_, _USER_), "w", "utf-8")
fxml.write(_XML_)
fxml.close()
personal_lang = ""
for t in sorted(dict_lang_p.items(), key=lambda k: -k[1])[:2]:
personal_lang += "\t%s: %.2f%% (%s)" % (t[0], t[1] / sumc(dict_lang_p) * 100 , t[1])
lang = ""
for t in sorted(dict_lang.items(), key=lambda k: -k[1])[:2]:
lang += "\t%s: %.2f%% (%s)" % (t[0], t[1] / sumc(dict_lang) * 100, t[1])
mentions_user = ""
for t in sorted(dict_mentions_user.items(), key=lambda k: -k[1]):
mentions_user += "\t%s(%s)" % t
personal_hashtags = ""
for t in sorted(dict_hashtag_p.items(), key=lambda k: -k[1]):
personal_hashtags += "\t%s: %s\n" % t
hashtags = ""
for t in sorted(dict_hashtag.items(), key=lambda k: -k[1]):
hashtags += "\t%s: %s\n" % t
personal_mentions = ""
for t in sorted(dict_mentions_p.items(), key=lambda k: -k[1]):
personal_mentions += "\t%s: %s\n" % t
if t[0] in dict_mentions_user:
dict_mentions_mutual[t[0]] += 1
mentions_mutual = ""
for t in sorted(dict_mentions_mutual.items(), key=lambda k: -k[1]):
mentions_mutual += "\t%s(%s)" % t
mentions = ""
for t in sorted(dict_mentions.items(), key=lambda k: -k[1]):
mentions += "\t%s: %s\n" % t
retweets = ""
for t in sorted(dict_retweets.items(), key=lambda k: -k[1]):
retweets += "\t%s: %s\n" % t
output = """
\n\n
Overview:
---------
Id: %s
Name: %s
Join Date: %s
Biography: %s
Location: %s
Url: %s
Is Famous: %s
Tweets: %s
Following: %s
Followers: %s
Favorites: %s
Number of tweets captured: %s (%s tweets / %s retweets)
--------------------------
> Personal language:
%s
> Total language:
%s
> People who has mentioned him:
%s
> Mutual Mentions:
%s
> Personal Hashtags:
%s
> Total Hashtags:
%s
> Personal Mentions:
%s
> Total Mentions:
%s
> Retweets:
%s
"""
try:
output = output % (
user_id,
user_name,
user_joinDate,
user_bio.replace("\r\n", ""),
user_location,
user_url,
user_isFamous,
user_tweets,
user_following,
user_followers,
user_favs,
tweet_counter + retweet_counter, tweet_counter, retweet_counter,
personal_lang,
lang,
mentions_user,
mentions_mutual,
personal_hashtags,
hashtags,
personal_mentions,
mentions,
retweets
)
except:
pass
print(output)
print("[ tweets saved in %s%s.xml ]" % (_BASE_DIR_, _USER_))
print("[ profile picture saved in %s ]" % _BASE_PHOTOS)
print("[ images uploaded by the user saved in %s ]" % _BASE_PHOTOS_PERSONAL)
print("[ images retweeted by the user saved in %s ]" % _BASE_PHOTOS_EXTERN)
print("[ finised ]\n")
ss.scookies.load()
document = ss.load("https://twitter.com/login")
if document.select("title").text().startswith("Login"):
params = {
"session[username_or_email]": "", # <- your user
"session[password]": "", # <- your password
"authenticity_token": document.select("@name=authenticity_token").getAttribute("value")[0],
"scribe_log": "",
"redirect_after_login": "",
"remember_me": "1"
}
ss.post("/sessions", params)
ss.scookies.save()
if __name__ == "__main__":
scrapes(args.USER)
|
14,943 | 67bb1a6be71f637c73934d20bd724dadbdc71a3c | from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import pre_save
from django.dispatch import receiver
from .base import *
class Country(Base):
name = models.CharField(
verbose_name=_('Name'),
max_length=200,
null=True
)
c_index = models.CharField(
verbose_name=_('Index'),
max_length=200,
null=True
)
def __str__(self):
return self.name
@receiver(pre_save, sender=Country)
def pre_save_Country(sender, instance, **kwargs):
print('===>>>> pre_save_Country')
instance.name = instance.name.capitalize()
instance.c_index = instance.name[0].capitalize()
|
14,944 | 4ab38cb73baec7473bc2f75cf13e2f4ddd21c3ca | import json
class Tree:
def __init__(self, root, branches):
self.root = root
self.branches = branches
def __repr__(self):
# a = ' '.join(self.root) + ' '.join(self.branches)
return json.dumps(self.branches) + self.root # a
|
14,945 | d48f5cc11542a4677375366381e7c469801184a3 | # -*- coding: utf-8 -*-
from .cases import *
from .simple_test_cases import *
|
14,946 | cf438b6fa67bfdacdba859e706daafa06127137c | # -*- coding: utf-8 -*-
"""
Lin GUO
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train=pd.read_csv('data_subsets/0_6.csv')
meta_train = pd.read_csv('metadata_train.csv')
def phase_indices(signal_num):
phase1 = 3*signal_num
phase2 = 3*signal_num + 1
phase3 = 3*signal_num + 2
return phase1,phase2,phase3
sig_fault=meta_train[meta_train['target']==1].index
s_id = 0
p1,p2,p3 = phase_indices(s_id)
plt.figure(figsize=(10,5))
plt.title('Signal %d / Target:%d'%(s_id,meta_train[meta_train.id_measurement==s_id].target.unique()[0]))
plt.plot(train.iloc[:,p1],marker="o", linestyle="none")
plt.plot(train.iloc[:,p2])
plt.plot(train.iloc[:,p3])
plt.figure(figsize=(10,5))
plt.title('Signal %d / Target:%d'%(s_id,meta_train[meta_train.id_measurement==s_id].target.unique()[0]))
plt.plot(train.iloc[:,p1],marker="o", linestyle="none")
plt.plot(train.iloc[:,p2],marker="o", linestyle="none")
plt.plot(train.iloc[:,p3],marker="o", linestyle="none")
f, axarr = plt.subplots(10, sharex=True,figsize=(10,15))
n=0
for i in range(0,10):
# print(train.iloc[:,subset[i]:subset[i+1]].head())
name='data_fault/'+str(sig_fault[n+i])+'.csv'
print(name)
#print(df.tail(2))
df=pd.read_csv(name)
axarr[i].plot(df,marker="o", linestyle="none")
|
14,947 | de5ba9c122515fce3203affdfe11a2c2fe3c4344 | # a tentative script to upload all existing drstree "versions" into CMIP sqlite database
# each variable, mip, experiment, model, ensemble combination add a new instance in "instance"
# for each instance there should be at least one version in "version" table
# for each version add at least one file in table "files"
from __future__ import print_function
from ARCCSSive.CMIP5.update_db_functions import insert_unique, add_bulk_items
from ARCCSSive.CMIP5.other_functions import *
#NB tmptree root dir is also defined there
from ARCCSSive.CMIP5 import DB
from ARCCSSive.CMIP5.Model import Instance, Version, VersionFile
import glob
# open local database using ARCSSive interface
conn = DB.connect()
db = conn.session
#kwargs={"institute":"BCC","model":"bcc-csm1-1-m", "experiment":"historical"}
kwargs=defaultdict(lambda: "*")
#kwargs=dict(model="IPSL-CM5A-MR", experiment="amip", mip="fx")
kwargs=dict(model="IPSL-CM5A-MR", experiment="amip", frequency="mon")
#loop through entire drstree or a subdir by using constraints **kwargs
instances=list_drstree(**kwargs)
print(instances)
#for each instance individuated add instance row
for inst in instances:
# call file_details to retrieve experiment, variable, model etc. from filename
# call drs_details to retrieve model, experiment, freq. & realm (become mip), variable, ensemble from drstree path
# return dictionary
# could i create an Instance, Version and file object instead and pass that on?
kw_instance={}
kw_version={}
kw_files={}
frequency, kw_instance = drs_details(inst)
filename=glob.glob(inst+"/latest/*.nc")
kw_instance['mip'] = get_mip(filename)
#print(kw_instance)
# make sure details list isn't empty
if kw_instance:
versions = list_drs_versions(inst)
# add instance to db if not already existing
inst_obj,new = insert_unique(db, Instance, **kw_instance)
print(inst)
print(inst_obj.id,new)
#P use following two lines if tmp/tree
#kw_version['version'] = find_version(bits[:-1], version)
#kw_version['path'] = '/'.join(bits[:-1])
kw_version['instance_id'] = inst_obj.id
for v in versions:
# add version to db if not already existing
kw_version['version'] = v
files = list_drs_files(inst+"/"+v)
kw_version['path'] = tree_path("/".join([inst,v,files[0]]))
#print(kw_version.items())
v_obj,new = insert_unique(db, Version, **kw_version)
print(v)
print(v_obj.id,new)
if v_obj.filenames==[]:
rows=[]
for f in files:
checksum=check_hash(v_obj.path+"/"+f,'md5')
rows.append(dict(filename=f, md5=checksum, version_id=v_obj.id))
add_bulk_items(db, VersionFile, rows)
else:
kw_files['version_id']=v_obj.id
for f in files:
kw_files['filename']=f
kw_files['md5']=check_hash(v_obj.path+"/"+f,'md5')
insert_unique(db, VersionFile, **kw_files)
# need to have function to map bits of path to db instance fields!!
#model,experiment,variable,mip,ensemble
#kwargs[k]=
|
14,948 | 96c6016362799363992157988a4e7137da22c9e3 | from django.urls import path
from . import views
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import url, include
from .feeds import AllPostRssFeed
from django.views.static import serve
from blog.settings import MEDIA_ROOT
app_name = 'blog_app'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('posts/<int:pk>', views.PostDetailView.as_view(), name='detail'),
path('archives/<int:year>/<int:month>', views.ArchiveView.as_view(), name='archive'),
path('categorys/<int:pk>', views.CategoryView.as_view(), name='category'),
path('show_categorys/', views.show_categorys, name='show_categorys'),
path('show_archives/', views.show_archives, name='show_archives'),
path('tags/<int:pk>', views.TagView.as_view(), name='tag'),
url(r'mdeditor/', include('mdeditor.urls')),
url(r'all/rss/', AllPostRssFeed(), name='rss'),
path('search/', views.search, name='search'),
path('profile/', views.profile, name='profile'),
# 处理 media 信息,用于图片获取
url(r'^media/(?P<path>.*)', serve, {"document_root": MEDIA_ROOT}),
]
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
14,949 | 843eb1776030e2306ca9c1e93aa2ca6fe279f54b | import datetime as dt
import re
from src.schema.movie import MovieSchema, MovieSearch
from marshmallow.exceptions import ValidationError
from src.models.movie import Movie
from flask import jsonify
def request_to_schema(request, toSearch = False):
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
fields = ['title', 'genre', 'expiry_date', 'release_year']
args = {}
schema = MovieSchema() if not toSearch else MovieSearch()
for field in fields:
arg = request.json.get(field, None)
if not arg and not toSearch:
return jsonify({"msg": "Missing {} parameter".format(field)}), 400
if field is 'expiry_date' and arg != None:
#This is a nightmare someone needs to make this a library
arg = dt.datetime(*[int(num) for num in re.compile("[/\:-]").split(arg)])
args[field] = arg
try:
result = schema.dump(args)
except ValidationError as e:
return jsonify({"msg": "Errors with request body: {}".format(e)}), 400
return result
def movie_dump(movie: Movie):
return MovieSchema().dump(movie) |
14,950 | 5890841ac59bdc1220956f1a8bbb042af776b522 | #!/usr/bin/python
import sys
total_post = 0
for line in sys.stdin:
data = line.strip().split("\"")
total_post += int (data[1])
print ("Total post ", total_post) |
14,951 | ed3b3396500b660d494085799baa05ae712a375f | import pandas as pd
from openpyxl import load_workbook
class DataFrame(object):
def __init__(self,casepath,sheet_name):
self.testcase = pd.read_excel(casepath,sheet_name = sheet_name)
def fill_null(self,column):
#处理excel中存在合并的单元格导致dataframe为空的的数据项,method = 'ffill'向上填充
self.testcase[column].fullna(method='ffill')
def write_excel(self,casepath,sheet_name):
#因为直接to_excel会覆盖原来的数据 需要引入openpyxl库
book = load_workbook(casepath)
excelWriter = pd.ExcelWriter(casepath,engine='openpyxl')
excelWriter.book = book
self.testcase.to_excel(excelWriter,sheet_name=sheet_name)
excelWriter.close()
#self.testcase.to_excel(casepath,sheet_name = sheet_name)
def get_testcase(self):
return self.testcase
if __name__ == '__main__':
test = DataFrame('C:\\Users\\Z8647\\Desktop\\data_test02.xlsx','Sheet5')
print(test)
book = load_workbook('C:\\Users\\Z8647\\Desktop\\data_test02.xlsx')
excelWriter =pd.ExcelWriter('C:\\Users\\Z8647\\Desktop\\data_test02.xlsx',engine='openpyxl')
print(excelWriter.book)
print(book)
excelWriter.book = book
test.testcase.to_excel(excelWriter,sheet_name='Sheet6')
excelWriter.close()
|
14,952 | 0858b404343ebfc129d3206f4311263b85b219c9 | import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
prob = [0.1, 0.2, 0.3,
0.1, 0.2, 0.1]
dice = list(range(1, 7))
samples = np.random.choice(dice, size=100000, p=prob)
tmp = Counter(samples)
print(tmp)
print(tmp.__class__)
X = sorted(tmp.keys())
Y = [tmp[x] for x in X]
plt.bar(X, Y)
plt.show() |
14,953 | f9595838ef33b69a5ccbbd0850760ce7a461bda6 | from django.core.management.base import BaseCommand, CommandError
from shorten.models import KirrURL
class Command(BaseCommand):
help = 'Refresh all shortcodes'
def add_arguments(self, parser):
parser.add_argument('--items', type=int)
def handle(self, *args, **options):
return KirrURL.objects.refresh_shortcode(items=options['items']) |
14,954 | 7135329bce670012ebf6ba1ac93b9a77c35e415a | from django.db import models
from employees.models import *
# Create your models here.
class Department(models.Model):
department_name = models.CharField(max_length=30)
department_dec = models.ForeignKey(
'self',
on_delete=models.SET_NULL,
blank=True,
null=True,
)
def __str__(self):
return self.department_name
class Position(models.Model):
position_name = models.CharField(max_length=30)
def __str__(self):
return self.position_name
class Contract(models.Model):
user_id = models.ForeignKey(
CustomUser,
on_delete=models.CASCADE,
blank=True,
null=True,
)
dep_id = models.ForeignKey(
Department,
on_delete=models.CASCADE,
blank=True,
null=True,
)
pos_id = models.ForeignKey(
Position,
on_delete=models.CASCADE,
blank=True,
null=True,
)
start_date = models.DateField('HireDate')
end_date = models.DateField(
default=None,
blank=True,
null=True,
)
def __str__(self):
return self.user_id.username
def end_date_func(self):
if self.end_date is None:
return "Working"
return self.end_date
|
14,955 | 1d9f7c1fbb78a99d6089a274d6e253107b5e16a9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 1 19:23:15 2021
@author: Oscar Talero
"""
#======================================================================
# E S P A C I O D E T R A B A J O A L U M N O
# ====================================================================
#----------Definición de Funciones (Dividir)------------
from collections import namedtuple
def separar_cadenas(lista_datos):
"""
Parameters
----------
lista_datos:string
Una cadena con los datos de todos los IoT de una smarth-home
Returns
-------
lista_IoT:[(namedtuple)]
una lista de tuplas cada una de ellas con los datos de un dispositivo IoT
"""
lista_t = []
lista = lista_datos.split('@') #eliminamos el separador @
for i in range(len(lista)):
lista_uno = lista[i].split(',')
lista_t.append(lista_uno)
lista_IoT = nombrar_tupla(lista_t)
return lista_t
def nombrar_tupla(lista_t):
"""
Parameters
----------
lista_t:lista
Una lista con los datos de todos los IoT de una smarth-home
Returns
-------
lista_IoT:[(namedtuple)]
una lista de tuplas cada una de ellas con los datos de un dispositivo IoT
"""
#creamos la nametupled para guardar los datos de los dispositivos
Dispositivo = namedtuple('Dispositivo','tipo identificador estado')
lista_IoT = []
for i in range(len(lista_t)):
x,y,z = lista_t[i]
#creamos los dispositivos y agragamos los datos
dispositivo_crear = Dispositivo(x,y,z)
#creamos una lista que almacena las tuplas
lista_IoT.append(dispositivo_crear)
return lista_IoT
def calcular_estadisticas(lista_IoT):
"""
Parameters
----------
lista_IoT:[(namedtuple)]
Una lista de tuplas con los datos de los dispositivos IoT
Returns
-------
estadistica:(total_on, total_off)
una tupla con el total de dispositivos IoT en estado ON y otra con el total de estado
"""
lista_on = []
lista_off = []
contador_on = 0
contador_off = 0
Tabla = """\
+----------------------------------+
| Tipo Identificador Estado |
|----------------------------------|
{}
+----------------------------------+\
"""
for i in range(len(lista_IoT)):
if lista_IoT[i][2] == 'ON':
contador_on += 1
lista_on.append(lista_IoT[i])
elif lista_IoT[i][2] == 'OFF':
contador_off += 1
lista_off.append(lista_IoT[i])
tupla_on = tuple(lista_on)
tupla_off = tuple(lista_off)
print('Dispositivos que se encuentran en estado ON: ',contador_on)
Tabla_on = (Tabla.format('\n'.join("| {:<10} {:<10} {:>10} |".format(*fila)for fila in tupla_on)))
print(Tabla_on,end = '\n''\n')
tupla_on = tuple(lista_on)
print('Dispositivos que se encuentran en estado OFF: ',contador_off)
Tabla_off = (Tabla.format('\n'.join("| {:<10} {:<10} {:>10} |".format(*fila)for fila in tupla_off)))
print(Tabla_off)
return 'FIN' |
14,956 | c8fb8e73b182db4a55bebb690cdc8eda72ff2e70 | import math
def main():
#escribe tu código abajo de esta línea
a=4
b=5
oper1= 2*(3/4) +4*(2/3) -3*(1/5) +5*(1/2)
print (round(oper1,4))
oper2= 2* math.sqrt(35**2)+ 4* math.sqrt(36**3)- 6* math.sqrt(49**2)
print (round(oper2,4))
oper3= (a**3 +2*b**2)/(4*a)
print (round(oper3,4))
oper4= (2*(a+b)**2 +4*(a-b)**2)/(a*b**2)
print (round(oper4,4))
oper5= math.sqrt((a+b)**2 + 2**(a+b))/(2*a + 2*b)**2
print (round(oper5,4))
if __name__ == '__main__':
main()
|
14,957 | b9058a6aab33b979446747c464912e2f5e9ee77f | """Modify cached data to include class_id.
Other stages in the nr set will need to use the database ids when writing to
the database. Instead of having to keep looking up the mapping and check if
entries exist we put it all in one place.
"""
import copy
from pymotifs import core
from pymotifs.nr.builder import Known
from pymotifs.nr.classes import Loader as ClassLoader
from pymotifs.nr.release import Loader as NrReleaseLoader
from pymotifs.constants import NR_CACHE_NAME
class Loader(core.MassLoader):
dependencies = set([NrReleaseLoader, ClassLoader])
allow_no_data = True
def mapping(self, release_id, grouping):
"""Compute the mapping from names to ids in the database for the given
nr release id.
Parameters
----------
release_id : int
The nr release id
grouping : list
The data to comptue a mapping for. Each entry in the list should be
a dict with a 'name'.'full' entry which contains the NR name.
Returns
-------
mapping : dict
A mapping from NR name to db id.
"""
helper = Known(self.config, self.session)
classes = [g['name']['full'] for g in grouping]
return helper.mapping(release_id, classes)
def remove(self, *args, **kwargs):
"""We never remove any data."""
return False
def has_data(self, *args, **kwargs):
"""We always run this stage, so this returns False.
Returns
-------
missing : bool
False
"""
return False
def transform(self, grouping, mapping):
"""Transform the grouping to include database ids.
Parameters
----------
grouping : dict
The dictonary which must have a 'members' entry to transform
mapping : dict
A mapping from NR name to database id.
Raises
------
InvalidState
If grouping or mapping is not truthy.
Returns
-------
transformed : dict
A copy of grouping with database ids in ['name']['class_id'].
"""
if not grouping:
raise core.InvalidState("Cannot load chains without classes")
if not mapping:
raise core.InvalidState("Cannot load chains without name mapping")
transformed = copy.deepcopy(grouping)
for group in transformed:
for chain in group['members']:
name = group['name']['full']
if name not in mapping:
raise core.InvalidState("Group %s not in mapping" % group)
group['name']['class_id'] = mapping[name]
return transformed
def data(self, *args, **kwargs):
"""Modify the cached data to include database ids. This will load the
cached NR data and modify it to include the database ids. This will
never return any data because it is then cached after modification.
"""
data = self.cached(NR_CACHE_NAME)
if not data:
raise core.InvalidState("No grouping loaded")
mapping = self.mapping(data['release'], data['groups'])
data['groups'] = self.transform(data['groups'], mapping)
self.cache(NR_CACHE_NAME, data)
return None
|
14,958 | 419ea76c731861d97b1cf9b25bbec30d2c79653f | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
### load Data
import tensorflow as tf
import gzip
from time import time
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
import keras as ks
import keras
import numpy as np
import tensorflow.keras.backend as K
from random import random
from random import randint
from numpy import array
from numpy import zeros
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import TimeDistributed
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import MaxPooling1D
from tensorflow.keras.layers import AveragePooling1D
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras import optimizers
from keras.layers.merge import concatenate
#from tensorflow.keras.utils import plot_model
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.utils import multi_gpu_model
import multiprocessing
#from eli5.sklearn import PermutationImportance
#from numba import jit
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
#from keras.callbacks import TensorBoard
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice
import numpy as np
import pickle
import os
from keras.preprocessing.sequence import TimeseriesGenerator
from tensorflow.python.framework import ops
ops.reset_default_graph()
# In[ ]:
import multiprocessing
#import dask.dataframe as dk
import pandas as pd
import numpy as np
import datetime as dt
#import matplotlib.pyplot as plt
idx=pd.IndexSlice
from sklearn.metrics import make_scorer, r2_score,accuracy_score,precision_score
from sklearn.externals import joblib
import os
import gc
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from tqdm import tqdm
import inspect
# In[ ]:
multiprocessing.cpu_count()
# In[ ]:
def data():
readConfigForLoading=pd.read_csv('/beegfs/sr4376/Finance Data/ModelConfig/ConfigLSTMSimple100.csv')
Year=readConfigForLoading['Year'][0]
lookBackYear=readConfigForLoading['lookBackYear'][0]
LSTMWindow = readConfigForLoading['LSTMWindow'][0]
NumberOfFeatures = readConfigForLoading['NumberOfFeatures'][0]
with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/FeatureYear' + str(Year) +'lookBackYear' +str(lookBackYear) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:
X_train=pickle.load( handle)
with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/TargetYear' + str(Year) +'lookBackYear' +str(lookBackYear) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:
y_train=pickle.load( handle)
#y_train=np.load('/beegfs/sr4376/Finance Data/hyperopt/hyperas/tempOpt/tempYtrainHyper5.pkl.npy')
#X_test=np.load('/beegfs/sr4376/Finance Data/hyperopt/hyperas/tempOpt/tempXtestHyper5.pkl.npy')
#y_test=np.load('/beegfs/sr4376/Finance Data/hyperopt/hyperas/tempOpt/tempYtestHyper5.pkl.npy')
print(1)
#, X_test, y_test
return X_train, y_train
# In[ ]:
def create_model(X_train, y_train):
readConfigForLoading=pd.read_csv('/beegfs/sr4376/Finance Data/ModelConfig/ConfigLSTMSimple100.csv')
length = readConfigForLoading['LSTMWindow'][0]
n_features = readConfigForLoading['NumberOfFeatures'][0]
def simple_sharpe_loss_function(y_actual,y_predicted):
M=52
M=K.cast(M,dtype='float32')
sharpe_loss_value=K.mean(y_actual*y_predicted)/K.std(y_actual*y_predicted)*K.sqrt(M)
return sharpe_loss_value
model = Sequential()
model.add(LSTM(units={{choice([5, 10, 20, 40, 60, 80, 100, 120])}}, input_shape=(length,n_features),recurrent_dropout={{choice([0,0.1,0.2,0.3,0.4,0.5])}}))
if {{choice(['three', 'four'])}} == 'four':
model.add(Dense(units={{choice([5, 10, 20])}}))
model.add(Dense(1,activation='linear'))
opt=Adam(lr={{choice([0.00001,0.0001,0.001,0.01,0.1,1])}},clipnorm={{choice([0.0001,0.001,0.01,0.1,1,10])}})
model.compile(loss=simple_sharpe_loss_function, optimizer=opt)
model.summary()
es=EarlyStopping(monitor='val_loss',mode='min',verbose=2,patience=25)
# checkpoint = ModelCheckpoint('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/BestModel.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='min',period=10)
# tensorboard = TensorBoard(log_dir=r"D:\ML for Finance\data\logs\{}".format(time()),histogram_freq=10,write_graph=True,write_images=True,update_freq="epoch")
#,tensorboard
callback_List = [es]
result=model.fit(X_train, y_train, batch_size=6000, epochs=30,callbacks = callback_List, validation_split=0.1,verbose=2)
validation_acc = np.amin(result.history['val_loss'])
print('Best validation acc of epoch:', -validation_acc)
return {'loss': validation_acc,'status': STATUS_OK,'model':model}
# In[ ]:
def continueToTrainModelLSTM(params):
readConfigForLoading=pd.read_csv('/beegfs/sr4376/Finance Data/ModelConfig/ConfigLSTMSimple100.csv')
length = readConfigForLoading['LSTMWindow'][0]
n_features = readConfigForLoading['NumberOfFeatures'][0]
unitsToChoice = [5, 10, 20, 40, 60, 80, 100, 120]
learningRateToChoice = [0.00001,0.0001,0.001,0.01,0.1,1]
clipnormToChoice = [0.0001,0.001,0.01,0.1,1,10]
recurrent_dropout = [0,0.1,0.2,0.3,0.4,0.5]
unitsAfterLSTMToChoice = [5, 10, 20]
def simple_sharpe_loss_function(y_actual,y_predicted):
M=52
M=K.cast(M,dtype='float32')
sharpe_loss_value=K.mean(y_actual*y_predicted)/K.std(y_actual*y_predicted)*K.sqrt(M)
return sharpe_loss_value
model = Sequential()
model.add(LSTM(units=unitsToChoice[params['units']], input_shape=(length,n_features),recurrent_dropout=recurrent_dropout[params['recurrent_dropout']]))
if params['recurrent_dropout_1'] == 1:
model.add(Dense(units=unitsAfterLSTMToChoice[params['units_1']]))
model.add(Dense(1,activation='linear'))
opt=Adam(lr=learningRateToChoice[params['lr']],clipnorm=clipnormToChoice[params['clipnorm']])
model.compile(loss=simple_sharpe_loss_function, optimizer=opt)
model.summary()
return model
gc.collect()
predictionPeriod=1
LSTMWindow=21
yearsBack=np.arange(1,2)
NumberOfFeatures=100
epochs=30
batch_size=6000
for jj in yearsBack:
years=np.arange(2008,2015)
best_model = None
for ii in years:
print(years)
lowYear=ii-jj
config=pd.DataFrame([[ii, jj ,LSTMWindow, NumberOfFeatures]],columns=['Year','lookBackYear','LSTMWindow','NumberOfFeatures'])
config.to_csv('/beegfs/sr4376/Finance Data/ModelConfig/ConfigLSTMSimple100.csv')
if best_model is None:
best_run, best_model = optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
max_evals=10,
trials=Trials())
print('best model over the optmization')
print(best_run)
model=best_model
else:
model=continueToTrainModelLSTM(best_run)
es=EarlyStopping(monitor='val_loss',mode='min',verbose=2,patience=25)
# checkpoint = ModelCheckpoint('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/SimpleLSTMBestModel.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='min',period=10)
# tensorboard = TensorBoard(log_dir=r"D:\ML for Finance\data\logs\{}".format(time()),histogram_freq=10,write_graph=True,write_images=True,update_freq="epoch")
#tensorboard
callback_List = [es]
with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/FeatureYear' + str(ii) +'lookBackYear' +str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:
X_train=pickle.load( handle)
with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/TargetYear' + str(ii) +'lookBackYear' +str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:
y_train=pickle.load( handle)
result=model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,callbacks = callback_List, validation_split=0.1,verbose=2)
validation_acc = np.amin(result.history['val_loss'])
print('Best validation acc of epoch:', -validation_acc)
model_json = model.to_json()
with open('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/LSTMSimpleModel' + str(ii) + 'yearsBackHyperopt' + str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.json',"w") as json_file:
json_file.write(model_json)
best_model.save_weights('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/LSTMSimpleModelWeights' + str(ii) +'yearsBackHyperopt' + str(jj) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.h5')
with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/FeatureYear' + str(ii+1) +'lookBackYear' +str(1) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:
ValidationData=pickle.load( handle)
with gzip.open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/TargetYear' + str(ii+1) +'lookBackYear' +str(1) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.pklz', 'rb') as handle:
ValidationTarget=pickle.load( handle)
with open ('/beegfs/sr4376/Finance Data/LSTM/yearsBack/indexObjectYear' + str(ii+1) +'lookBackYear' +str(1) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.csv', 'rb') as handle:
validationIndex=pd.read_csv( handle,parse_dates=['1'])
#
validationIndex.rename(columns={'0':'entityID', '1':'date'},inplace=True)
validationIndex.set_index(['entityID','date'],inplace=True,drop=False)
validationIndex.drop(columns='Unnamed: 0',inplace=True)
pred1=best_model.predict(ValidationData, batch_size=2000)
print(2)
pred1=pd.DataFrame(pred1)
pred1['targets']=ValidationTarget
pred1['entityID']=validationIndex['entityID'].values
pred1['date']=validationIndex['date'].values
pred1.set_index(['entityID','date'],inplace=True)
pred1.to_csv('/beegfs/sr4376/Finance Data/LSTMResults/yearsBack/LSTMSimplePrediction' + str(ii) +'yearsBackHyperopt' + str(11) + 'LSTMWindow' + str(LSTMWindow) + 'NumberOfFeatures' + str(NumberOfFeatures) + '.csv')
|
14,959 | 2892ee5aedd10c1df2d4c53e79895e49059d03d1 | # coding: utf-8
'''
function:
1. 鼠标事件
作用:
1. 为视频选定区域
'''
import cv2
import os
import config as cfg
import numpy as np
def get_rect(image, title='get_rect'):
'''
获取两次鼠标点击事件得到的矩形框
:param image:
:param title:
:return: (x1, y1), (x2, y2)
'''
mouse_params = {'tl': None, 'br': None, 'current_pos': None,
'released_once': False}
cv2.namedWindow(title)
cv2.moveWindow(title, 100, 100)
def on_mouse(event, x, y, flags, param):
param['current_pos'] = (x, y)
if param['tl'] is not None and not (flags & cv2.EVENT_FLAG_LBUTTON):
param['released_once'] = True
if flags & cv2.EVENT_FLAG_LBUTTON:
if param['tl'] is None:
param['tl'] = param['current_pos']
elif param['released_once']:
param['br'] = param['current_pos']
cv2.setMouseCallback(title, on_mouse, mouse_params)
cv2.imshow(title, image)
while mouse_params['br'] is None:
im_draw = np.copy(image)
if mouse_params['tl'] is not None:
cv2.rectangle(im_draw, mouse_params['tl'],
mouse_params['current_pos'], (255, 0, 0))
cv2.imshow(title, im_draw)
cv2.waitKey(10)
cv2.destroyWindow(title)
start = (min(mouse_params['tl'][1], mouse_params['br'][1]),
min(mouse_params['tl'][0], mouse_params['br'][0]))
end = (max(mouse_params['tl'][1], mouse_params['br'][1]),
max(mouse_params['tl'][0], mouse_params['br'][0]))
return start, end
def get_unload_oil():
'''
将选定的区域保存到 config_scripts 目录下
:return: None
'''
cap = cv2.VideoCapture(cfg.oil_video)
while cap.isOpened():
first_success, first_frame = cap.read()
if first_success:
anni_a, anni_b = get_rect(first_frame, title='get_rect')
pipe_a, pipe_b = get_rect(first_frame, title='get_rect')
el_a, el_b = get_rect(first_frame, title='get_rect')
anni_area = anni_a + anni_b
pipe_area = pipe_a + pipe_b
el_area = el_a + el_b
cv2.destroyAllWindows()
cap.release()
break
else:
continue
print([anni_area, pipe_area, el_area])
# 获取图像区域后将其保存
with open('../config_scripts/oil_config.txt', mode='w') as f:
for area in [anni_area, pipe_area, el_area]:
# area --> y1 x1 y2 x2
f.write('%d %d %d %d\n' % (area[0], area[1], area[2], area[3]))
if __name__ == '__main__':
get_unload_oil()
|
14,960 | 5d5c72551ac012e12a33c31118553a776149f99c | """Implementation of node listing, deployment and destruction"""
from __future__ import absolute_import
import datetime
import itertools
import json
import os
import re
import string
import libcloud.compute.providers
import libcloud.compute.deployment
import libcloud.compute.ssh
from libcloud.compute.types import NodeState
import provision.config as config
from provision.collections import OrderedDict
logger = config.logger
def get_driver(secret_key=config.DEFAULT_SECRET_KEY, userid=config.DEFAULT_USERID,
provider=config.DEFAULT_PROVIDER):
"""A driver represents successful authentication. They become
stale, so obtain them as late as possible, and don't cache them."""
if hasattr(config, 'get_driver'):
logger.debug('get_driver %s' % config.get_driver)
return config.get_driver()
else:
logger.debug('get_driver {0}@{1}'.format(userid, provider))
return libcloud.compute.providers.get_driver(
config.PROVIDERS[provider])(userid, secret_key)
def list_nodes(driver):
logger.debug('list_nodes')
return [n for n in driver.list_nodes() if n.state != NodeState.TERMINATED]
class NodeProxy(object):
"""Wrap a libcloud.base.Node object and add some functionality"""
def __init__(self, node, image):
self.node = node
self.image = image
def __getattr__(self, name):
return getattr(self.node, name)
def write_json(self, path):
info = {
'id': self.node.id,
'name': self.node.name,
'state': self.node.state,
'public_ip': self.node.public_ip,
'private_ip': self.node.private_ip,
'image_id': self.image.id,
'image_name': self.image.name}
with open(path, 'wb') as df:
json.dump(info, df)
df.close()
def __repr__(self):
s = self.node.__repr__()
if hasattr(self.node, 'script_deployments') and self.node.script_deployments:
ascii_deployments = [ # force script output to ascii encoding
{'name':sd.name, 'exit_status':sd.exit_status, 'script':sd.script,
'stdout': sd.stdout.decode('ascii', 'ignore'),
'stderr': sd.stderr.decode('ascii', 'ignore')}
for sd in self.node.script_deployments]
s += '\n'.join(
['*{name}: {exit_status}\n{script}\n{stdout}\n{stderr}'.format(**sd)
for sd in ascii_deployments])
return s
def destroy(self):
"""Insure only destroyable nodes are destroyed"""
node = self.node
if not config.is_node_destroyable(node.name):
logger.error('node %s has non-destroyable prefix' % node.name)
return False
logger.info('destroying node %s' % node)
return node.destroy()
def sum_exit_status(self):
"""Return the sum of all deployed scripts' exit_status"""
return sum([sd.exit_status for sd in self.node.script_deployments])
def substitute(script, submap):
"""Check for presence of template indicator and if found, perform
variable substition on script based on template type, returning
script."""
match = config.TEMPLATE_RE.search(script)
if match:
template_type = match.groupdict()['type']
try:
return config.TEMPLATE_TYPEMAP[template_type](script, submap)
except KeyError:
logger.error('Unsupported template type: %s' % template_type)
raise
return script
def script_deployment(path, script, submap=None):
"""Return a ScriptDeployment from script with possible template
substitutions."""
if submap is None:
submap = {}
script = substitute(script, submap)
return libcloud.compute.deployment.ScriptDeployment(script, path)
def merge(items, amap, load=False):
"""Merge list of tuples into dict amap, and optionally load source as value"""
for target, source in items:
if amap.get(target):
logger.warn('overwriting {0}'.format(target))
if load:
amap[target] = open(source).read()
else:
amap[target] = source
def merge_keyvals_into_map(keyvals, amap):
"""Merge list of 'key=val' strings into dict amap, warning of duplicate keys"""
for kv in keyvals:
k,v = kv.split('=')
if k in amap:
logger.warn('overwriting {0} with {1}'.format(k, v))
amap[k] = v
class Deployment(object):
"""Split the deployment process into two steps"""
def __init__(self, name=None, bundles=[], pubkey=config.DEFAULT_PUBKEY,
prefix=config.DEFAULT_NAME_PREFIX, image_name=config.DEFAULT_IMAGE_NAME,
subvars=[]):
"""Initialize a node deployment.
If name is not given, it will generate a random name using
prefix. The node name is added to the global substitution
map, which is used to parameterize templates in scripts
containing the form {variable_name}.
The list of bundle names is concatenated with any globally
common bundle names from which result the set of files to be
installed, and scripts to be run on the new node.
The pubkey is concatented with any other public keys loaded
during configuration and used as the first step in the
multi-step deployment. Additional steps represent the scripts
to be run.
The image_name is used to determine which set of default
bundles to install, as well as to actually get the image id in
deploy()."""
self.name = name or prefix + config.random_str()
config.SUBMAP['node_name'] = self.name
config.SUBMAP['node_shortname'] = self.name.split('.')[0]
merge_keyvals_into_map(subvars, config.SUBMAP)
logger.debug('substitution map {0}'.format(config.SUBMAP))
self.pubkeys = [pubkey]
self.pubkeys.extend(config.PUBKEYS)
self.image_name = image_name
filemap = {}
scriptmap = OrderedDict() # preserve script run order
install_bundles = config.DEFAULT_BUNDLES[:]
image_based_bundles = config.IMAGE_BUNDLES_MAP.get(image_name)
if image_based_bundles:
install_bundles.extend(image_based_bundles[:])
install_bundles.extend(bundles)
for bundle in install_bundles:
logger.debug('loading bundle {0}'.format(bundle))
merge(config.BUNDLEMAP[bundle].filemap.items(), filemap)
merge(config.BUNDLEMAP[bundle].scriptmap.items(), scriptmap, load=True)
logger.debug('files {0}'.format(filemap.keys()))
logger.debug('scripts {0}'.format(scriptmap.keys()))
file_deployments = [libcloud.compute.deployment.FileDeployment(
source, target) for target, source in filemap.items()]
logger.debug('len(file_deployments) = {0}'.format(len(file_deployments)))
self.script_deployments = [script_deployment(path, script, config.SUBMAP)
for path, script in scriptmap.items()]
logger.debug('len(script_deployments) = {0}'.format(len(self.script_deployments)))
steps = [libcloud.compute.deployment.SSHKeyDeployment(''.join(self.pubkeys))]
steps.extend(file_deployments)
steps.extend(self.script_deployments)
self.deployment = libcloud.compute.deployment.MultiStepDeployment(steps)
def deploy(self, driver, location_id=config.DEFAULT_LOCATION_ID,
size=config.DEFAULT_SIZE):
"""Use driver to deploy node, with optional ability to specify
location id and size id.
First, obtain location object from driver. Next, get the
size. Then, get the image. Finally, deploy node, and return
NodeProxy. """
logger.debug('deploying node %s using driver %s' % (self.name, driver))
args = {'name': self.name}
if hasattr(config, 'SSH_KEY_NAME'):
args['ex_keyname'] = config.SSH_KEY_NAME
if hasattr(config, 'EX_USERDATA'):
args['ex_userdata'] = config.EX_USERDATA
args['location'] = driver.list_locations()[location_id]
logger.debug('location %s' % args['location'])
args['size'] = size_from_name(size, driver.list_sizes())
logger.debug('size %s' % args['size'])
logger.debug('image name %s' % config.IMAGE_NAMES[self.image_name])
args['image'] = image_from_name(
config.IMAGE_NAMES[self.image_name], driver.list_images())
logger.debug('image %s' % args['image'])
logger.debug('creating node with args: %s' % args)
node = driver.create_node(**args)
logger.debug('node created')
# password must be extracted before _wait_until_running(), where it goes away
logger.debug('driver.features %s' % driver.features)
password = node.extra.get('password') \
if 'generates_password' in driver.features['create_node'] else None
logger.debug('waiting for node to obtain %s' % config.SSH_INTERFACE)
node, ip_addresses = driver._wait_until_running(
node, timeout=1200, ssh_interface=config.SSH_INTERFACE)
ssh_args = {'hostname': ip_addresses[0], 'port': 22, 'timeout': 10}
if password:
ssh_args['password'] = password
else:
ssh_args['key'] = config.SSH_KEY_PATH if hasattr(config, 'SSH_KEY_PATH') else None
logger.debug('initializing ssh client with %s' % ssh_args)
ssh_client = libcloud.compute.ssh.SSHClient(**ssh_args)
logger.debug('ssh client attempting to connect')
ssh_client = driver._ssh_client_connect(ssh_client)
logger.debug('ssh client connected')
logger.debug('starting node deployment with %s steps' % len(self.deployment.steps))
driver._run_deployment_script(self.deployment, node, ssh_client)
node.script_deployments = self.script_deployments # retain exit_status, stdout, stderr
logger.debug('node.extra["imageId"] %s' % node.extra['imageId'])
return NodeProxy(node, args['image'])
def size_from_name(size, sizes):
"""Return a size from a list of sizes."""
by_name = [s for s in sizes if s.name == size]
if len(by_name) > 1:
raise Exception('more than one image named %s exists' % size)
return by_name[0]
def image_from_name(name, images):
"""Return an image from a list of images. If the name is an exact
match, return the last exactly matching image. Otherwise, sort
images by 'natural' order, using decorate-sort-undecorate, and
return the largest.
see:
http://code.activestate.com/recipes/285264-natural-string-sorting/
"""
prefixed_images = [i for i in images if i.name.startswith(name)]
if name in [i.name for i in prefixed_images]:
return [i for i in prefixed_images if i.name == name][-1]
decorated = sorted(
[(int(re.search('\d+', i.name).group(0)), i) for i in prefixed_images])
return [i[1] for i in decorated][-1]
def destroy_by_name(name, driver):
"""Destroy all nodes matching specified name"""
matches = [node for node in list_nodes(driver) if node.name == name]
if len(matches) == 0:
logger.warn('no node named %s' % name)
return False
else:
return all([node.destroy() for node in matches])
|
14,961 | 8d9a8a3cc7b5d8b34f67b604d94bf9e30d568d5a | from django.shortcuts import render
from . import models, forms, check
# Create your views here.
def campground_checker_view(request):
"""Handles availability requests and loads the form for users to submit requests."""
# If POST request, retrieve data from API
if request.method == 'POST':
form = forms.CampgroundForm(request.POST)
if form.is_valid():
start_date = form.cleaned_data['start_date']
end_date = form.cleaned_data['end_date']
camp_ids = form.cleaned_data['camp_ids']
camp_id_list = camp_ids.split()
try:
results, start_string, end_string = check.master_scraping_routine(camp_id_list, start_date, end_date)
return render(request, 'availability_results.html', {'start_date': start_string,
'end_date': end_string,
'results': results})
except:
return render(request, 'no_results_found.html')
else:
return 'No success'
# If GET or other type of request, load empty form
else:
form = forms.CampgroundForm()
return render(request, 'availability.html', {'form': form}) |
14,962 | 09cd01e9698959760ffd893a6ebb98d0f18339a1 | import maya.cmds as cmds
import maya.OpenMaya as om
from collections import OrderedDict
#----------------------------------------------------------------------------------------------------------------------------------------------#
def Hitch_L_LegSnap(prefix=''):
#################
ikfkValue = cmds.getAttr(prefix + "Leg_l_Attributes.IK_1_FK_0")
fkNeutral = (42.6 + 43.6)
ikLength = cmds.getAttr(prefix + "Hitch_lowerleg_l_IK_JNT.translateX") + cmds.getAttr(prefix + "Hitch_leg_end_l_IK_JNT.translateX")
fkStretch = max((ikLength - fkNeutral ) / 2,0)
################################################################
fk_procs = OrderedDict()
ik_procs = OrderedDict()
fk_procs = {prefix + 'Hitch_upperleg_l_FK_Ctrl':prefix + 'Hitch_upperleg_l_IK_JNT',
prefix + 'Hitch_lowerleg_l_FK_Ctrl':prefix + 'Hitch_lowerleg_l_IK_JNT'}
ik_procs = {prefix + 'Hitch_l_foot_IK_Ctrl':prefix + 'Hitch_l_foot_FK_Ctrl',
prefix + 'Hitch_l_leg_PV_Ctrl':prefix + 'l_leg_PV_FK_ref'}
ik_foot = [prefix + 'Hitch_l_heel_IK_Ctrl',prefix + 'Hitch_l_toesTip_IK_Ctrl',prefix + 'Hitch_l_ball_IK_Ctrl',prefix + 'Hitch_l_tillOut_Ctrl',prefix + 'Hitch_l_tillIn_Ctrl']
####################################
if ikfkValue == 0: # SWITCHING TO IK
for ik, fk in ik_procs.items():
fk_pos = cmds.xform(fk, q=True, ws=True, m=True)
cmds.xform(ik, ws=True, m=fk_pos)
# foot ctrls
for ctrl in ik_foot:
cmds.xform(ctrl, ro = [0,0,0], t = [0,0,0])
attr = cmds.listAttr(prefix + "Foot_l_IK_Animation_Values", v=1, k=1)
for att in attr:
cmds.setAttr(prefix + "Foot_l_IK_Animation_Values.{}".format(att), 0 )
# toes rot
fk_toes_rot = cmds.xform(prefix + 'Hitch_l_toes_FK_Ctrl', q=True, ws=True, ro=True)
cmds.xform(prefix + 'Hitch_l_toes_IK_Ctrl', ws=True, ro=fk_toes_rot)
cmds.setAttr(prefix + "Leg_l_Attributes.IK_1_FK_0", 1)
om.MGlobal.displayInfo(" You are now in IK system ")
#####################################
if ikfkValue == 1: # SWITCHING TO FK
cmds.setAttr(prefix + "Leg_l_Attributes.FK_Stretch", fkStretch)
for key, val in fk_procs.items():
ik_rot = cmds.xform(val,q=True, ws=True, ro=True)
cmds.xform(key, ws=True, ro = ik_rot)
ik_foot = cmds.xform(prefix + 'Hitch_l_foot_IK_Ctrl', q=True, ws=True, m=True)
cmds.xform(prefix + 'Hitch_l_foot_FK_Ctrl', ws=True, m=ik_foot)
ik_toes_rot = cmds.xform(prefix + 'Hitch_l_toes_IK_Ctrl', q=True, ws=True, ro=True)
cmds.xform(prefix + 'Hitch_l_toes_FK_Ctrl', ws=True, ro=ik_toes_rot)
cmds.setAttr(prefix + "Leg_l_Attributes.IK_1_FK_0", 0)
om.MGlobal.displayInfo(" You are now in FK system ")
#----------------------------------------------------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------------------------------------------------#
def Hitch_R_LegSnap(prefix=''):
#################
ikfkValue = cmds.getAttr(prefix + "Leg_r_Attributes.IK_1_FK_0")
fkNeutral = (42.6 + 43.6)
ikLength = cmds.getAttr(prefix + "Hitch_lowerleg_r_IK_JNT.translateX") + cmds.getAttr(prefix + "Hitch_leg_end_r_IK_JNT.translateX")
fkStretch = max((ikLength - fkNeutral ) / 2,0)
################################################################
fk_procs = OrderedDict()
ik_procs = OrderedDict()
fk_procs = {prefix + 'Hitch_upperleg_r_FK_Ctrl':prefix + 'Hitch_upperleg_r_IK_JNT',
prefix + 'Hitch_lowerleg_r_FK_Ctrl':prefix + 'Hitch_lowerleg_r_IK_JNT'}
ik_procs = {prefix + 'Hitch_r_foot_IK_Ctrl':prefix + 'Hitch_r_foot_FK_Ctrl',
prefix + 'Hitch_r_leg_PV_Ctrl':prefix + 'r_leg_PV_FK_ref'}
ik_foot = [prefix + 'Hitch_r_heel_IK_Ctrl',prefix + 'Hitch_r_toesTip_IK_Ctrl',prefix + 'Hitch_r_ball_IK_Ctrl',prefix + 'Hitch_r_tillOut_Ctrl',prefix + 'Hitch_r_tillIn_Ctrl']
####################################
if ikfkValue == 0: # SWITCHING TO IK
for ik, fk in ik_procs.items():
fk_pos = cmds.xform(fk, q=True, ws=True, m=True)
cmds.xform(ik, ws=True, m=fk_pos)
# foot ctrls
for ctrl in ik_foot:
cmds.xform(ctrl, ro = [0,0,0], t = [0,0,0])
attr = cmds.listAttr(prefix + "Foot_r_IK_Animation_Values", v=1, k=1)
for att in attr:
cmds.setAttr(prefix + "Foot_r_IK_Animation_Values.{}".format(att), 0 )
# toes rot
fk_toes_rot = cmds.xform(prefix + 'Hitch_r_toes_FK_Ctrl', q=True, ws=True, ro=True)
cmds.xform(prefix + 'Hitch_r_toes_IK_Ctrl', ws=True, ro=fk_toes_rot)
cmds.setAttr(prefix + "Leg_r_Attributes.IK_1_FK_0", 1)
om.MGlobal.displayInfo(" You are now in IK system ")
#####################################
if ikfkValue == 1: # SWITCHING TO FK
cmds.setAttr(prefix + "Leg_r_Attributes.FK_Stretch", fkStretch)
for key, val in fk_procs.items():
ik_rot = cmds.xform(val,q=True, ws=True, ro=True)
cmds.xform(key, ws=True, ro = ik_rot)
ik_foot = cmds.xform(prefix + 'Hitch_r_foot_IK_Ctrl', q=True, ws=True, m=True)
cmds.xform(prefix + 'Hitch_r_foot_FK_Ctrl', ws=True, m=ik_foot)
ik_toes_rot = cmds.xform(prefix + 'Hitch_r_toes_IK_Ctrl', q=True, ws=True, ro=True)
cmds.xform(prefix + 'Hitch_r_toes_FK_Ctrl', ws=True, ro=ik_toes_rot)
cmds.setAttr(prefix + "Leg_r_Attributes.IK_1_FK_0", 0)
om.MGlobal.displayInfo(" You are now in FK system ")
#----------------------------------------------------------------------------------------------------------------------------------------------#
def Hitch_R_arm_Snap(prefix=''):
ikfkValue = cmds.getAttr(prefix +"Arm_r_Attributes.IK_1_FK_0")
fkNeutral = (25.267 + 28.608)
ikLength = cmds.getAttr(prefix +"Hitch_lowerarm_r_IK.tx") + cmds.getAttr(prefix +"hitch_arm_end_r_IK.tx")
fkStretch = max((ikLength - fkNeutral ) / 2,0)
auto_clav_value = cmds.getAttr(prefix + 'Arm_r_Attributes.Auto_Clavicle')
# FK to IK
if ikfkValue == 0:
if auto_clav_value == 1:
channel = ['rx','ry','rz']
for ch in channel:
val = cmds.getAttr(prefix + '{}.{}'.format('Hitch_r_clavicle_FK_ctrl_Auto', ch))
cmds.setAttr(prefix + '{}.{}'.format('Hitch_r_clavicle_FK_Ctrl', ch), val)
cmds.setAttr(prefix + 'Arm_r_Attributes.Auto_Clavicle',0)
clav_ik_pos = cmds.xform(prefix +"Hitch_r_clavicle_FK_Ctrl", q=1, ws=1, ro=1)
cmds.xform(prefix +"hitch_clavicle_r_IK_Ctrl", ws=1, ro=clav_ik_pos)
pv_pos = cmds.xform(prefix +"Hitch_r_arm_PV_FKRef", q=1, ws=1, m=1)
cmds.xform(prefix +"Hitch_r_arm_PV_Ctrl", ws=1, m=pv_pos)
hand_pos = cmds.xform(prefix +"Hitch_r_hand_FK_Ctrl", q=1, ws=1, m=1)
cmds.xform(prefix +"Hitch_r_hand_IK_Ctrl", ws=1, m=hand_pos)
cmds.setAttr(prefix +"Arm_r_Attributes.IK_1_FK_0", 1)
om.MGlobal.displayInfo(" You are now in IK system ")
# IK to FK
if ikfkValue == 1:
cmds.setAttr(prefix +"Arm_r_Attributes.FK_Stretch", fkStretch)
clav_pos = cmds.xform(prefix +"hitch_clavicle_r_IK_Ctrl", q=1, ws=1, ro=1)
cmds.xform(prefix +"Hitch_r_clavicle_FK_Ctrl", ws=1, ro=clav_pos)
upper_rot = cmds.xform(prefix +"Hitch_upperarm_r_IK", q=1, ws=1, ro=1)
cmds.xform(prefix +"Hitch_r_upperarm_FK_Ctrl", ws=1, ro=upper_rot)
lower_rot = cmds.xform(prefix +"Hitch_lowerarm_r_IK", q=1, ws=1, ro=1)
cmds.xform(prefix +"Hitch_r_lowerarm_FK_Ctrl", ws=1, ro=lower_rot)
matrix = cmds.xform(prefix +"Hitch_r_hand_IK_Ctrl", q=1, ws=1, m=1)
cmds.xform(prefix +"Hitch_r_hand_FK_Ctrl", ws=1, m=matrix)
cmds.setAttr(prefix +"Arm_r_Attributes.IK_1_FK_0", 0)
om.MGlobal.displayInfo(" You are now in FK system ")
#----------------------------------------------------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------------------------------------------------#
def Hitch_L_arm_Snap(prefix=''):
ikfkValue = cmds.getAttr(prefix +"Arm_l_Attributes.IK_1_FK_0")
fkNeutral = (25.267 + 28.608)
ikLength = cmds.getAttr(prefix +"Hitch_lowerarm_l_IK.tx") + cmds.getAttr(prefix +"hitch_arm_end_l_IK.tx")
fkStretch = max((ikLength - fkNeutral ) / 2,0)
print ('FKS: %s / FKN: %s - IKS: %s' %(fkStretch,fkNeutral, ikLength))
auto_clav_value = cmds.getAttr(prefix + 'Arm_l_Attributes.Auto_Clavicle')
# FK to IK
if ikfkValue == 0:
if auto_clav_value == 1:
channel = ['rx','ry','rz']
for ch in channel:
val = cmds.getAttr(prefix + '{}.{}'.format('Hitch_l_clavicle_FK_ctrl_Auto', ch))
cmds.setAttr(prefix + '{}.{}'.format('Hitch_l_clavicle_FK_Ctrl', ch), val)
cmds.setAttr(prefix + 'Arm_l_Attributes.Auto_Clavicle',0)
clav_ik_pos = cmds.xform(prefix +"Hitch_l_clavicle_FK_Ctrl", q=1, ws=1, ro=1)
cmds.xform(prefix +"hitch_clavicle_l_IK_Ctrl", ws=1, ro=clav_ik_pos)
pv_pos = cmds.xform(prefix +"Hitch_l_arm_PV_FKRef", q=1, ws=1, m=1)
cmds.xform(prefix +"Hitch_l_arm_PV_Ctrl", ws=1, m=pv_pos)
hand_pos = cmds.xform(prefix +"Hitch_l_hand_FK_Ctrl", q=1, ws=1, m=1)
cmds.xform(prefix +"Hitch_l_hand_IK_Ctrl", ws=1, m=hand_pos)
cmds.setAttr(prefix +"Arm_l_Attributes.IK_1_FK_0", 1)
om.MGlobal.displayInfo(" You are now in IK system ")
# IK to FK
if ikfkValue == 1:
cmds.setAttr(prefix +"Arm_l_Attributes.FK_Stretch", fkStretch)
clav_pos = cmds.xform(prefix +"hitch_clavicle_l_IK_Ctrl", q=1, ws=1, ro=1)
cmds.xform(prefix +"Hitch_l_clavicle_FK_Ctrl", ws=1, ro=clav_pos)
upper_rot = cmds.xform(prefix +"Hitch_upperarm_l_IK", q=1, ws=1, ro=1)
cmds.xform(prefix +"Hitch_l_upperarm_FK_Ctrl", ws=1, ro=upper_rot)
lower_rot = cmds.xform(prefix +"Hitch_lowerarm_l_IK", q=1, ws=1, ro=1)
cmds.xform(prefix +"Hitch_l_lowerarm_FK_Ctrl", ws=1, ro=lower_rot)
matrix = cmds.xform(prefix +"Hitch_l_hand_IK_Ctrl", q=1, ws=1, m=1)
cmds.xform(prefix +"Hitch_l_hand_FK_Ctrl", ws=1, m=matrix)
cmds.setAttr(prefix +"Arm_l_Attributes.IK_1_FK_0", 0)
om.MGlobal.displayInfo(" You are now in FK system ")
#----------------------------------------------------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------------------------------------------------#
if __name__ == '__main__':
#Hitch_R_LegSnap()
#Hitch_L_LegSnap()
#Hitch_R_arm_Snap()
Hitch_L_arm_Snap()
|
14,963 | 43c207d3aa864b36ed27b71904c01b3d77ab9310 | class Solution:
def romanToInt(self, s: str) -> int:
encodeTable = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000
}
total = 0
if not s:
return total
i = len(s) - 1
while i >= 0:
if encodeTable[s[i]] > encodeTable[s[i-1]] and i >= 1:
total += encodeTable[s[i]] - encodeTable[s[i-1]]
i -= 2
else:
total += encodeTable[s[i]]
i -= 1
return total
s = Solution()
print(s.romanToInt("MCMXCIV"))
|
14,964 | 9205e857cc9fef576abfb03d9822cae487929ae4 | # coding: utf-8
""" Test different reading data from different mass runs """
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import astropy.units as u
from astropy.constants import G
from astropy.io.misc import fnpickle
import matplotlib.pyplot as plt
import numpy as np
import pytest
from ... import usys
from ..sgr import SgrSimulation
from ...coordinates.frame import heliocentric
plot_path = "plots/tests/io/sgr"
if not os.path.exists(plot_path):
os.makedirs(plot_path)
p_kwargs = dict(marker='.', linestyle='none', color='k', alpha=0.1)
s_kwargs = dict(marker='o', linestyle='none', color='r', alpha=0.75,
markersize=10)
l_kwargs = dict(marker='^', linestyle='none', color='g', alpha=0.75,
markersize=10)
class Test10E8(object):
def setup_class(self):
self.sgr = SgrSimulation("sgr_plummer/2.5e8", "SNAP")
particles = self.sgr.particles(expr="tub==0")
self.particles = particles.decompose(usys)
satellite = self.sgr.satellite()
self.satellite = satellite.decompose(usys)
# Here are the true parameters from the SCFCEN file
r0 = np.array([36.82173, 2.926886, -4.172226])*self.sgr.units['length']
v0 = np.array([4.654394, -0.9905948, 5.080418])*self.sgr.units['speed']
self.true_r = np.squeeze(r0.decompose(usys).value)
self.true_v = np.squeeze(v0.decompose(usys).value)
def test_position(self):
fig,axes = plt.subplots(2, 2, figsize=(10,10))
axes[0,1].set_visible(False)
axes[0,0].plot(self.particles["x"].value,
self.particles["y"].value,
label="all particles", **p_kwargs)
axes[1,0].plot(self.particles["x"].value,
self.particles["z"].value,
**p_kwargs)
axes[1,1].plot(self.particles["y"].value,
self.particles["z"].value,
**p_kwargs)
axes[0,0].plot(self.satellite["x"].value,
self.satellite["y"].value,
label="Satellite", **s_kwargs)
axes[1,0].plot(self.satellite["x"].value,
self.satellite["z"].value,
**s_kwargs)
axes[1,1].plot(self.satellite["y"].value,
self.satellite["z"].value,
**s_kwargs)
axes[0,0].plot(self.true_r[0], self.true_r[1], label="Law",
**l_kwargs)
axes[1,0].plot(self.true_r[0], self.true_r[2], **l_kwargs)
axes[1,1].plot(self.true_r[1], self.true_r[2], **l_kwargs)
sz = 2
axes[0,0].set_xlim(self.true_r[0]-sz, self.true_r[0]+sz)
axes[0,0].set_ylim(self.true_r[1]-sz, self.true_r[1]+sz)
axes[1,0].set_xlim(self.true_r[0]-sz, self.true_r[0]+sz)
axes[1,0].set_ylim(self.true_r[2]-sz, self.true_r[2]+sz)
axes[1,1].set_xlim(self.true_r[1]-sz, self.true_r[1]+sz)
axes[1,1].set_ylim(self.true_r[2]-sz, self.true_r[2]+sz)
axes[0,0].legend(fontsize=10)
fig.subplots_adjust(hspace=0.02,wspace=0.02)
fig.savefig(os.path.join(plot_path, "sat_ptcl_positions_2.5e8.png"))
def test_velocity(self):
fig,axes = plt.subplots(2, 2, figsize=(10,10))
axes[0,1].set_visible(False)
axes[0,0].plot(self.particles["vx"].value,
self.particles["vy"].value,
label="all particles", **p_kwargs)
axes[1,0].plot(self.particles["vx"].value,
self.particles["vz"].value,
**p_kwargs)
axes[1,1].plot(self.particles["vy"].value,
self.particles["vz"].value,
**p_kwargs)
axes[0,0].plot(self.satellite["vx"].value,
self.satellite["vy"].value,
label="Satellite", **s_kwargs)
axes[1,0].plot(self.satellite["vx"].value,
self.satellite["vz"].value,
**s_kwargs)
axes[1,1].plot(self.satellite["vy"].value,
self.satellite["vz"].value,
**s_kwargs)
axes[0,0].plot(self.true_v[0], self.true_v[1], label="Law", **l_kwargs)
axes[1,0].plot(self.true_v[0], self.true_v[2], **l_kwargs)
axes[1,1].plot(self.true_v[1], self.true_v[2], **l_kwargs)
sz = (50*u.km/u.s).decompose(usys).value
axes[0,0].set_xlim(self.true_v[0]-sz, self.true_v[0]+sz)
axes[0,0].set_ylim(self.true_v[1]-sz, self.true_v[1]+sz)
axes[1,0].set_xlim(self.true_v[0]-sz, self.true_v[0]+sz)
axes[1,0].set_ylim(self.true_v[2]-sz, self.true_v[2]+sz)
axes[1,1].set_xlim(self.true_v[1]-sz, self.true_v[1]+sz)
axes[1,1].set_ylim(self.true_v[2]-sz, self.true_v[2]+sz)
axes[0,0].legend(fontsize=10)
fig.subplots_adjust(hspace=0.02,wspace=0.02)
fig.savefig(os.path.join(plot_path, "sat_ptcl_velocities_2.5e8.png"))
def test_pickle(self):
particles = self.sgr.particles(n=25, expr="tub==0")
fnpickle(particles, os.path.join(plot_path, "test.pickle"))
p = particles.to_frame(heliocentric)
fnpickle(p, os.path.join(plot_path, "test2.pickle")) |
14,965 | c751b5e6154ac522bb2693273ae8f2d8c9b2849a | import pygame
import random
from kreis import Kreis
pygame.init()
pygame.display.set_mode((500, 500))
pygame.display.set_caption("Zufällige Kreise")
kreise = []
counter = 0
bRun = True
while bRun:
pygame.time.delay(100)
for event in pygame.event.get():
if event.type == pygame.QUIT:
bRun = False
if counter == 0:
k = Kreis()
k.set_random_values(500, 500)
kreise.append(k)
counter = (counter + 1) % 10
win = pygame.display.get_surface()
win.fill((0, 0, 0))
for k in kreise:
k.move()
k.draw(win)
pygame.display.update()
pygame.quit()
|
14,966 | 35a63a0a35ec2f8ffb83e0525bb91b6ef2781fa4 | # Generated by Django 3.0.8 on 2020-12-07 10:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('estimate', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='estimate',
name='category',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='종류'),
),
migrations.AddField(
model_name='estimate',
name='material',
field=models.CharField(blank=True, max_length=256, null=True, verbose_name='재질'),
),
migrations.AddField(
model_name='estimate',
name='maximumPrice',
field=models.FloatField(default=0, verbose_name='사출가 최대가격'),
),
migrations.AddField(
model_name='estimate',
name='minimumPrice',
field=models.FloatField(default=0, verbose_name='사출가 최소가격'),
),
migrations.AddField(
model_name='estimate',
name='moldedMaximumPrice',
field=models.FloatField(default=0, verbose_name='금형가 최대가격'),
),
migrations.AddField(
model_name='estimate',
name='moldedMinimumPrice',
field=models.FloatField(default=0, verbose_name='금형가 최소가격'),
),
]
|
14,967 | 585ea2240cbd825f04c7c7048ef8b45c648ba382 | from django.shortcuts import get_object_or_404, render
from django.template.loader import render_to_string
from django.http import *
from django.urls import reverse
from django.template import loader, Context
from django.contrib.auth.models import User
from django.core.validators import *
from django.core.mail import *
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from .models import Game
import hashlib
from .ChessAI import *
def index(request):
temp_cont = {'body' : "" , 'title' : "Welcome to the Chess game"}
temp_cont['body'] = render_to_string("webchess/index.html")
return render(request, "template.html", temp_cont)
@login_required
def game(request):
if(not request.user.is_staff):
HttpResponseRedirect(reverse("index"))
temp_cont = {'body' : "" , 'title' : "The Game Page"}
game = Game.objects.get(player = request.user)
content = {"state" : game.state}
temp_cont["body"] = render_to_string("webchess/game.html", content)
return render(request, "template.html", temp_cont)
@login_required
def api(request):
move = request.POST.get("move")
move = move.split()
move[0] = move[0][::-1]
move = ' '.join(move)
usr = request.user
game = Game.objects.get(player = usr)
tmp = chess(game.state)
if(len(list(tmp.next_possible_moves(1))) == 0 and tmp.king_is_under_attack(1)):
return HttpResponse("You LOST!!-" + game.state)
if(len(list(tmp.next_possible_moves(1))) == 0 and (not tmp.king_is_under_attack(1))):
return HttpResponse("POT-" + game.state)
if(not tmp.user_move(move)):
return HttpResponse("wrong way!-" + game.state)
ai_move = alpha_beta_pruning(tmp, 4, a = -inf, b = inf, player = 0, maxim = 1)
game.state = str(tmp)
game.save()
if(ai_move[1] == None and tmp.king_is_under_attack(0)):
return HttpResponse("AI Lost and YOU WON THE GAME!!-" + game.state)
if(ai_move[1] == None and not tmp.king_is_under_attack(0)):
return HttpResponse("POT-" + game.state)
tmp.move(ai_move[1])
game.state = str(tmp)
game.save()
return HttpResponse("ai move was:" + str(ai_move[1]) + "-" + game.state)
def login(request):
temp_cont = {'body' : "" , 'title' : "login"}
content = {"error" : ""}
if(request.method == "GET"):
temp_cont['body'] = render_to_string("registration/login.html", content)
return render(request, "template.html", temp_cont)
us = request.POST["username"]
pwd = request.POST["password"]
user = authenticate(username=us, password=pwd)
if user is not None:
auth_login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
content['error'] = "Username or Password in not correct"
temp_cont['body'] = render_to_string("registration/login.html", content)
return render(request, "template.html", temp_cont)
def logout(request):
auth_logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
cont = {"error" : "Singup here!"}
temp_cont = {'body' : "" , 'title' : "register"}
if(request.method == "POST"):
username = request.POST.get("username")
try:
user = User.objects.get(username=username)
except:
pass
else:
cont['error']="There is a user with this username :/"
temp_cont['body'] = render_to_string("registration/register.html", cont)
return render(request, "template.html", temp_cont)
email = request.POST.get("email")
try:
validate_email(email)
except:
cont['error']="email is not valid :/"
temp_cont['body'] = render_to_string("registration/register.html", cont)
return render(request, "template.html", temp_cont)
try:
user = User.objects.get(email=email)
except:
pass
else:
cont['error']="There is a user with this email :/"
temp_cont['body'] = render_to_string("registration/register.html", cont)
return render(request, "template.html", temp_cont)
pwd = request.POST.get("password")
repwd = request.POST.get("repassword")
if(pwd != repwd):
cont['error']="Passwords doesnt match :/"
temp_cont['body'] = render_to_string("registration/register.html", cont)
return render(request, "template.html", temp_cont)
new_user = User.objects.create_user(username, email, pwd)
new_user.is_staff = False
new_user.save()
game = Game(player = new_user)
game.save()
u_id = new_user.id
active_link = "http://" + request.META["HTTP_HOST"] + "/verify/" + str(u_id) + "/" + hashlib.md5((username + email + "HELL").encode()).hexdigest()
send_mail('activitation link', 'activate your account and confirm your mail with this link: \n' + active_link, settings.EMAIL_HOST_USER ,[email], fail_silently=False)
temp_cont['body'] = "new user created and verify email sent, now just go and login :))"
return render(request, "template.html", temp_cont)
temp_cont = {'body' : render_to_string("registration/register.html", cont) , 'title' : "register"}
return render(request, "template.html", temp_cont)
def verify(request):
temp_cont = {'body' : "" , 'title' : "Verify account"}
u_id = request.path.split('/')[-2]
u_hash = request.path.split('/')[-1]
try:
user = User.objects.get(id = u_id)
except Exception as e:
temp_cont['body'] = "this is a not a valid link :/"
return render(request, "template.html", temp_cont)
u_str = user.username + user.email + "HELL"
if(hashlib.md5(u_str.encode()).hexdigest() != u_hash):
temp_cont['body'] = "Hash code is not valid :/"
return render(request, "template.html", temp_cont)
user.is_staff = True
user.save()
temp_cont['body'] = "user activated :)"
return render(request, "template.html", temp_cont);
@login_required
def reset_game(request):
usr = request.user
g = Game.objects.get(player = usr)
g.reset_game()
g.save()
return HttpResponseRedirect(reverse("game")) |
14,968 | 19316b060e80c2f517dd176ff1a9a4e3389813e2 | """
This file Create by Fawziah Almutairi
Created on Mon Dec 16 01:25:40 2019
Programming for Geographical Information Analysts: Core Skills.
@author: ml18fksa, the Student ID is 201288865
Geography Programming Courses at University of Leeds
""
This file called the GUI which means build a Graphical User Interface at 2D environment.
The main task for this model is displays in a window with a list of agents which has number of animals eat and move at 2D environment
sharing resources and exchange resources with agents neighbourhood based on a green landscape of the environment.
"""
"""
using import to get code from anther python packages and moudules.
the imports sould be in the top of the moudle.
"""
"""
Step(1) import the libraries and files it is the main step.
"""
import agentframework
import random
import operator
import matplotlib.pyplot
import tkinter
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.animation
import csv
import requests
import bs4
"""
Step(2)to request the data from the web Page.
"""
def request_data():
r = requests.get('http://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html')
content = r.text
soup = bs4.BeautifulSoup(content, 'html.parser')
td_ys = soup.find_all(attrs={"class" : "y"})
td_xs = soup.find_all(attrs={"class" : "x"})
print(td_ys)
print(td_xs)
"""
Step(3) Make the agents in GUIs.
"""
# setting frameworks#
num_of_agents = 10 #the number of agents in a graph#
num_of_iterations = 100 # the number of iterations in the environment graph#
dist=20 # neighbourhood with 20 distance to sharing the environment#
agents = []
# to create agents in empty list#
#import environment data#
environment = []
#to make agents to produce similar outcome when it is run.
random.seed(1)
"""
Step(4)to Read the file
"""
#to open in.txt file
f = open("in.txt")
# data define to csv file to read the environment
data = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in data:
rowlist = []
for value in row:
rowlist.append(int(value))
environment.append(rowlist)
f.close() #to close the in.txt file after run it#
"""
Step(5) A command to calculate the distance.
deinition to distance the agents that have y and x coordinates.
"""
def distance_between(agents_row_a, agents_row_b):
return (((agents_row_a.x - agents_row_b.x)**2) +
((agents_row_a.y - agents_row_b.y)**2))**0.5
"""
Step(6)Communication with the 2D environment.
"""
# the way to creating agents list in the graph.
for i in range(num_of_agents):
agents.append(agentframework.Agents(i, agents, environment))
# to carry on in agents movement.
carry_on = True
"""
Step(7)To class the number of agents to moving and eating to sharing with enviroment in 2D inplotting.
TO Display the white Agents
"""
def update(frame_number):
global carry_on
#Clear the figure
fig.clear()
# Move the agents.
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share(dist)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y, color="white")
matplotlib.pyplot.imshow(environment)#share_with_neighbours(neighbourhood)
#matplotlib.pyplot.show()
"""
Step (8) deinition to get function of the agents in the librarie.
To display the animation
"""
def gen_function(b = [0]):
a = 0
global carry_on #Not actually needed as we're not assigning, but clearer
while (a < 10) & (carry_on) :
yield a # Returns control and waits next call.
a = a + 1
#create figure import automatically
fig = matplotlib.pyplot.figure(figsize=(7, 7))
#To display the animation
animation = matplotlib.animation.FuncAnimation(fig, update, interval=1000, repeat=False, frames=num_of_iterations)
matplotlib.pyplot.show()
# Just showing menu elements
def run():
animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
canvas.draw()
"""
Step (9) create root window.
To display a Graphical User Interface(GUI) model.
"""
# to create root window in the model
root = tkinter.Tk()
root.wm_title("Model")
menu_bar = tkinter.Menu(root)
root.config(menu=menu_bar)
model_menu = tkinter.Menu(menu_bar)
menu_bar.add_cascade(label="Model", menu=model_menu)
model_menu.add_command(label="Run model", command=run)
canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
tkinter.mainloop()
# Wait for interactions
|
14,969 | 8b3c33430a3b4acd071005a7ccb63187da248774 | # Python Activtiy
#
# Fill in the code for the functions below.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# import regular expression module
import re
# # Part A. starts_with_number
# Define a function starts_with_number(s) that takes a string and returns true
# if it starts with a number and false otherwise.
# (For our purposes, a number is any character that is 0,1,2,3,4,5,6,7,8, or 9.)
# Note: Be sure to use RegEx!
def starts_with_number(s):
# YOUR CODE HERE
#if (s) starts with number, return true
x = re.search("^\d", s)
if x:
return True
else:
return False
# # Part B. starts_with_consonant
# Define a function starts_with_consonant(s) that takes a string and returns true
# if it starts with a consonant and false otherwise.
# (For our purposes, a consonant is any letter other than A, E, I, O, U.)
# Note: Be sure to use RegEx and it works for both upper and lower case and for nonletters!
def starts_with_consonant(s):
# YOUR CODE HERE
#if (s) starts with constant, return true
x = re.search("^[b-zB-Z]", s)
y = re.search("^[^aeiouAEIOU]", s)
if x and y:
return True
else:
return False
# Part C. binary_multiple_of_4
# define a method binary_multiple_of_4(s) that takes a string and returns true
# if the string represents a binary number that is a multiple of 4.
# Note: Be sure it returns false if the string is not a valid binary number!
# Hint: Use regular expressions to match for the pattern of a binary number that is a multiple of 4.
def binary_multiple_of_4(s):
# YOUR CODE HERE
#if (s) represents binary num/multiple 4, return true
x = re.search("^[01]", s)
y = re.search("0$", s)
if x and y:
return True
else:
return False
|
14,970 | 98eec42ff428193c6ecdab1e80204287d1466714 | # Generated by Django 3.0.2 on 2020-01-13 09:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fyle', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='expense',
name='approved_at',
field=models.DateTimeField(help_text='Expense approved at', null=True),
),
]
|
14,971 | 5109eb2ffd5d01484a8cb1b7fd588d7214116878 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014, 2015 Patrick Moran for Verizon
#
# Distributes WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License. If not, see <http://www.gnu.org/licenses/>.
from collections import deque
import g_config
import g_eon_api_bridge
# from g_graphics import plot_assets
import time
import logging
import json
from g_lat_lon_distance import lat_lon_distance, move_to_lat_lon, compute_resolution
from sortedcontainers import SortedDict
import pickle
import copy
import pandas
from numpy import int64, fmax, argsort, array, interp, linspace, diff, random
import arrow
import Queue
import os
import threading
ON = 1
OFF = 0
class GroomingMessageHandler(threading.Thread):
def __init__(self,
incoming_q,
incoming_queue_lock,
outgoing_q,
outgoing_queue_lock,
module_instance_name='Unnamed',
shared_data=None, shared_data_lock=None):
self.incoming_rabbit_mq = incoming_q
self.incoming_queue_lock = incoming_queue_lock
self.outgoing_q = outgoing_q
self.outgoing_queue_lock = outgoing_queue_lock
self.my_local_logger = logging.getLogger(module_instance_name)
self.my_local_logger.setLevel(logging.DEBUG)
self.local_q = deque()
self.eon_api_bridge = g_eon_api_bridge.EonApiBridge()
self.handle_queue = False
self.instance_name = module_instance_name
# This is used to run the main loop
self.run_enable = True
self.shared_data = shared_data
self.shared_data_lock = shared_data_lock
self.start_time = 0
self.run_start_time = time.time()
self.groomer_state = "0:IDLE" # Used to determine the current state of this thread in a multi-threaded env
self.groom_run_state = "0:IDLE" # Used to determine the current run mode of this thread
self.idle_count = 0
self.end_time = 0
self.query_count = 0
self.asset_dictionary = {}
self.working_radius = g_config.START_RADIUS # This will hold the radius units 0.12
self.cell_collection_set = set()
self.resolution = compute_resolution(self.working_radius)
self.cell_count = 0
self.utility_region = g_config.UTILITY_REGION
self.ttl = g_config.TTL_MAX
self.SHOW_PLOTS = False
self.cell_time_event = False
threading.Thread.__init__(self)
@staticmethod
def check_message_payload(dequeued_item):
"""
This method checks that the message payload keys matches the required (specified) keys
:return: False is any key is missing otherwise True
"""
key_array = ["dateTime",
"payload",
"messageType"]
# Note that the "ttl" key (and others) may be present but its not checked here!
for key in key_array:
if key not in dequeued_item.keys():
return False
key_array = ["zoomR",
"spatial",
"circuitID",
"reputationEnabled",
"assetID",
"temporal",
"outageTime",
"company",
"votes",
"zoomT",
"longitude",
"latitude"]
for key in key_array:
if key not in dequeued_item["payload"].keys():
return False
return True
def process_incoming_rabbit_mq(self):
"""
Processes the Rabbit MQ bus messages and process the queue depending on the type
If the type is Query then put it on the local queue for processing later
"""
self.groomer_state = "3:PROCESS QUEUE"
lock_counter = 0
while not self.incoming_queue_lock.acquire(False):
self.my_local_logger.debug("Trying to acquire lock. Sleeping 0.05s.")
time.sleep(g_config.SLEEP_TIME)
lock_counter += 1
if lock_counter > 100:
self.my_local_logger.debug("Cant acquire incoming queue lock, returning")
self.my_local_logger.error("Unable to acquire lock in process_incoming_queue, returning!")
self.groomer_state = "4:PROCESS QUEUE LOCK ERROR"
return
while not self.incoming_rabbit_mq.empty():
self.my_local_logger.debug(
"Groomer says Incoming Rabbit MQ not empty, length is %d" % self.incoming_rabbit_mq.qsize())
self.my_local_logger.debug("Acquired lock")
# This is where the incoming grooming message is pulled off the Rabbit MQ.
dequeued_item = self.incoming_rabbit_mq.get()
if self.check_message_payload(dequeued_item):
self.my_local_logger.info("A %s type message was dequeued " %
dequeued_item['messageType'])
else:
self.my_local_logger.error("Message payload is malformed in process_incoming_queue, returning")
if self.incoming_queue_lock:
self.incoming_queue_lock.release()
self.my_local_logger.debug("GROOMER rabbit MQ lock was released")
self.my_local_logger.info("The rabbit MQ lock was released")
self.groomer_state = "5:PROCESS QUEUE MALFORMED"
return
# Determine what is queue command type is and dispatch it.
if dequeued_item['messageType'] == 'Test':
# This is a dummy Test which is dropped for now.
pass
elif dequeued_item['messageType'] == 'Clear':
# Restore the previous results
pass
elif dequeued_item['messageType'] == 'Save':
# Save the current groom (filter) settings and kick off a new Utility wide groom process
# Grab the Query message type and stuff it in a local fifo queue
self.my_local_logger.debug("Save type message received")
self.my_local_logger.debug("query_guid = %s" % "None - missing on save") # dequeued_item['queryGuid'])
#######################################################
# Collect interesting payload information here
#######################################################
if "ttl" not in dequeued_item.keys():
dequeued_item["ttl"] = g_config.TTL_UTILITY_SPAN
self.local_q.append(dequeued_item)
self.my_local_logger.debug("Message queued to the local incoming queue (len=%d)" % len(self.local_q))
self.my_local_logger.info("Message queued to the local incoming queue (len=%d)" % len(self.local_q))
pass
elif dequeued_item['messageType'] == 'Query':
# Grab the Query message type and stuff it in a local fifo queue
self.my_local_logger.debug("Query type message received")
self.my_local_logger.debug("query_guid = %s" % dequeued_item['queryGuid'])
#######################################################
# Collect interesting payload information here
#######################################################
if "ttl" not in dequeued_item.keys():
dequeued_item["ttl"] = g_config.TTL_MAX
self.local_q.append(dequeued_item)
self.my_local_logger.debug("Message queued to the local incoming queue (len=%d)" % len(self.local_q))
self.my_local_logger.info("Message queued to the local incoming queue (len=%d)" % len(self.local_q))
else:
self.my_local_logger.error("incoming_rabbit_mq TYPE is a UNKNOWN")
if self.incoming_queue_lock:
self.incoming_queue_lock.release()
self.my_local_logger.debug("GROOMER rabbit MQ lock was released")
self.my_local_logger.info("The rabbit MQ lock was released")
self.my_local_logger.debug("process_incoming_rabbit_mq finished")
self.groomer_state = "0:IDLE"
def get_data_in_cell_area(self, cell_parameters, ttl):
"""
Ask the EON API for onts, circuits and transformers for a given lat, lon and radius
Returns a group of items that are inside the circle with a given center (lat, lon) and
radius.
Note: convert the time units in the ONT event list into minutes by dividing by 60000
:param cell_parameters: Latitude
:param ttl: The time to live.
:return: this_cell # A hexagonal cell dictionary
this_cell = {'neighbors': [], # the 6 nearest neighbor cells
'assets': {}, # The utility assets including their lat and lon and events
'onts': {}, # Verizon's ONTs including their lat and lon and events
'state': '' # A string representing the state of this cell.
This is used for multi threading purposes so that neighboring cells can see
whats going on.
'circuits': {} # This is a set of circuits in this cell. All assets on a circuit
are in the circuits list
'lat_lon': [] # The lat and lon array of the center of the cell
'radius': 1.00 # The radius of the circumscribed cell.
ont_items is a dictionary of {'lat_lon':[],'assets':[],'events':[]}
asset_items is a dictionary of {'lat_lon':[],'onts':[],'events':[]}
circuit_items is a dictionary of {'connected_items' , asset_item_key}
where asset_item_key is a key entry in the asset_item dictionary
events is an array of 2 sets of events. events[0] is the "fail_time" and events[1] is the "restore_time"
A call to teh API is done in a loop to gather all items, here is a test of teh api call:
The swagger test example is
http://10.123.0.27:8080/eon360/api/query
With a json payload of
{
"itemType":"ALL",
"circle": {
"unit": "MILES",
"longitude": -73.8773389,
"radius": 1.0,
"latitude": 41.2693778
},
"pageParameter": {
"page": 0,
"size": 100
}
}
This will return a data structure like this
dd['eligibility']['dataItems']
dd['alarm']['dataItems']
dd['utility']['dataItems']
"""
# query_guid = payload["query_guid"]
this_lat = cell_parameters["latitude"]
this_lon = cell_parameters["longitude"]
# utility = cell_parameters["company"]
groom_time = cell_parameters["outageTime"]
# circuit_id = cell_parameters["circuitID"]
# asset_id = cell_parameters["assetID"]
# votes = cell_parameters["votes"]
# spatial = cell_parameters["spatial"]
# temporal = cell_parameters["temporal"]
# reputation_ena = cell_parameters["reputationEnabled"]
# zoom_t = cell_parameters["zoomT"]
# zoom_r = cell_parameters["zoomR"]
this_radius = cell_parameters["radius"]
# units = cell_parameters["units"]
query_type = "ALL"
ont_serial_number_set = set()
ont_items = {}
asset_serial_number_set = set()
asset_items = {}
circuit_serial_number_set = set()
circuit_items = {}
# The six neighbor cells are initially set to be empty
# This a string quid and an angle (in degrees)
neighbor_array = [["", 0], ["", 60], ["", 120], ["", 180], ["", 240], ["", 300]]
this_cell = {'neighbors': neighbor_array,
'assets': {},
'onts': {},
'circuits': {},
'state': 'creating',
'lat_lon': [this_lat, this_lon],
'radius': this_radius,
'groom_time': groom_time,
'ttl': 0
}
page_number = 0
page_size = 20
query_parameter = json.dumps({"itemType": query_type,
"circle": {"longitude": this_lon,
"latitude": this_lat,
"radius": this_radius, "unit": g_config.RADIUS_UNITS},
"pageParameter": {"page": page_number, "size": page_size}})
self.my_local_logger.debug("Formed query parameter: %s" % query_parameter)
dd = self.eon_api_bridge.query_post_eon_data_30(query_parameter=query_parameter)
more_pages = True
# Loop here until no more utility components of the first collection are found
while more_pages and dd is not None:
# This is the ONTs loop through them and find all the ONTs in the area
for this_ont in dd['eligibility']['dataItems']:
ont_dictionary_keyword = this_ont['ontSerialNumber']
ont_serial_number_set.add(ont_dictionary_keyword)
if ont_dictionary_keyword == "[PENDING INSTALL]":
self.my_local_logger.debug("skipping this ont in eligibility list")
continue
ont_items[ont_dictionary_keyword] = {'lat_lon': [this_ont['latitude'], this_ont['longitude']]}
alarm_set_time = set()
alarm_clear_time = set()
ont_items[ont_dictionary_keyword]['events'] = [alarm_set_time, alarm_clear_time]
ont_items[ont_dictionary_keyword]['assets'] = set()
for this_alarm in dd['alarm']['dataItems']:
alarm_dictionary_keyword = this_alarm['ontSerialNumber']
if alarm_dictionary_keyword not in ont_serial_number_set:
if alarm_dictionary_keyword == "[PENDING INSTALL]":
self.my_local_logger.debug("skipping this ONT in the alarm list")
continue
ont_serial_number_set.add(alarm_dictionary_keyword)
ont_items[alarm_dictionary_keyword] = {'lat_lon': [this_alarm['latitude'], this_alarm['longitude']]}
alarm_set_time = set()
alarm_clear_time = set()
ont_items[alarm_dictionary_keyword]['events'] = [alarm_set_time, alarm_clear_time]
ont_items[alarm_dictionary_keyword]['assets'] = set()
if this_alarm['alarmReceiveTime']:
alarm_set = float(this_alarm['alarmReceiveTime']) # * 1e-3) / 60
ont_items[alarm_dictionary_keyword]['events'][0].add(alarm_set)
if this_alarm['alarmClearTime']:
alarm_clear = float(this_alarm['alarmClearTime']) # * 1e-3) / 60
ont_items[alarm_dictionary_keyword]['events'][1].add(alarm_clear)
# Now go through the assets and associate the assets to the ONTs and the ONTs to the assets
for this_item in dd['utility']['dataItems']:
asset_dictionary_keyword = this_item['transformerID']
if asset_dictionary_keyword not in asset_serial_number_set:
asset_serial_number_set.add(asset_dictionary_keyword)
asset_items[asset_dictionary_keyword] = {'lat_lon': [this_item['latitude'], this_item['longitude']]}
asset_items[asset_dictionary_keyword]['events'] = [set(), set()]
asset_items[asset_dictionary_keyword]['onts'] = set()
asset_items[asset_dictionary_keyword]['guid'] = this_item['guid']
asset_items[asset_dictionary_keyword]['serviceAddress'] = this_item['serviceAddress']
for this_ont in this_item['eligibilityList']:
ont_dictionary_keyword = this_ont['ontSerialNumber']
if ont_dictionary_keyword not in ont_serial_number_set:
ont_serial_number_set.add(ont_dictionary_keyword)
ont_items[ont_dictionary_keyword] = {
'lat_lon': [this_ont['latitude'], this_ont['longitude']]}
alarm_set_time = set()
alarm_clear_time = set()
ont_items[ont_dictionary_keyword]['events'] = [alarm_set_time, alarm_clear_time]
ont_items[ont_dictionary_keyword]['assets'] = set()
# Skip the ONTs that don't have an installation.
if ont_dictionary_keyword == "[PENDING INSTALL]":
self.my_local_logger.debug("skipping the ONT listed on eligibility list in asset_id=%s" %
asset_dictionary_keyword)
self.my_local_logger.info("Skipping %s because it's status is PENDING INSTALL" %
asset_dictionary_keyword)
continue
# Stitch up the assets in the onts
ont_items[ont_dictionary_keyword]['assets'].add(asset_dictionary_keyword)
# Stitch up the onts in the assets
asset_items[asset_dictionary_keyword]['onts'].add(ont_dictionary_keyword)
circuit_dictionary_keyword = this_item['circuitID']
if circuit_dictionary_keyword not in circuit_serial_number_set:
# add the circuit item to the circuit_serial_number_set is needed
circuit_serial_number_set.add(circuit_dictionary_keyword)
# and create an empty set
circuit_items[circuit_dictionary_keyword] = {'connected_items': set()}
# Now add the data structure to the set
circuit_items[circuit_dictionary_keyword]['connected_items'].add(asset_dictionary_keyword)
###########################
# Look for the next page #
###########################
if (dd['utility']['pageTotalItems'] == page_size) or \
(dd['alarm']['pageTotalItems'] == page_size) or \
(dd['eligibility']['pageTotalItems'] == page_size):
self.my_local_logger.debug("Collecting next page for this message")
page_number += 1
more_pages = True
query_parameter = json.dumps({"itemType": query_type,
"circle": {"longitude": this_lon,
"latitude": this_lat,
"radius": this_radius,
"unit": g_config.RADIUS_UNITS},
"pageParameter": {"page": page_number, "size": page_size}})
dd = self.eon_api_bridge.query_post_eon_data_30(query_parameter=query_parameter)
else:
more_pages = False
this_cell['assets'] = asset_items
# Go over the ONT set and see it there are any that don't have alarms. This might happen if there were no alarms
# posted to this ONT because the main alarm injestion loop failed for some reason. There will still be alarms
# that are posted on the ONTs and those can be recovered here.
for this_ont in ont_items:
if len(ont_items[this_ont]['events'][0]) == 0 or len(ont_items[this_ont]['events'][1]) == 0:
# To find any ONTs that don't seem to have alarms make this call:
# where ONT_SERIAL_NUMBER is 00ABB96 in this example.
# http://10.123.0.27:8080/eon360/api/alarms?sortBy=alarmReceiveTime&ontSerialNumber=000ABB96&p=0&s=20
dd = self.eon_api_bridge.alarm_get_pons_nms_00(ont_serial_number=this_ont)
if dd:
if 'alarms' in dd.keys():
for this_alarm in dd['alarms']:
if this_alarm['alarmReceiveTime']:
alarm_set = float(this_alarm['alarmReceiveTime']) # * 1e-3) / 60
ont_items[this_ont]['events'][0].add(alarm_set)
self.my_local_logger.info("Adding an AlarmReceiveTime to the data")
if this_alarm['alarmClearTime']:
alarm_clear = float(this_alarm['alarmClearTime']) # * 1e-3) / 60
ont_items[this_ont]['events'][1].add(alarm_clear)
else:
self.my_local_logger.warning("No alarms found in call to alarm_get_pons_nms_00(ont_serial_number=%s)" % this_ont )
else:
self.my_local_logger.warning("Nothing returned from the API call")
this_cell['onts'] = ont_items
this_cell['circuits'] = circuit_items
this_cell['state'] = 'populated'
this_cell['ttl'] = ttl
self.my_local_logger.info("This CELL (radius= %3.3f %s @ lat=%f, lon=%f) has %d circuits, %d assets and %d onts." %
(this_radius, g_config.RADIUS_UNITS, this_lat, this_lon,
len(circuit_items), len(asset_items), len(ont_items))
)
# Note convert the time units into minutes by dividing by 60000
return this_cell
@staticmethod
def persist_cell_pickle(cell, filename=""):
"""
:param cell: The cell structure that is persisted to disk
:return:
"""
this_lat = cell['lat_lon'][0]
this_lon = cell['lat_lon'][1]
if this_lat < 0:
lat_str = ("%03.2f" % (float(round(-this_lat * 100)) / 100.0)).replace('.', 'm')
else:
lat_str = ("%03.2f" % (float(round(this_lat * 100)) / 100.0)).replace('.', 'p')
if this_lon < 0:
lon_str = ("%03.2f" % (float(round(-this_lon * 100)) / 100.0)).replace('.', 'm')
else:
lon_str = ("%03.2f" % (float(round(this_lon * 100)) / 100.0)).replace('.', 'p')
if filename == "":
filename = 'cell_' + lat_str + '_' + lon_str
filename += '.pck'
full_path = g_config.BASE_DIR + os.sep + g_config.PICKLES + os.sep + filename
with open(full_path, "w") as f: # write mode
pickle.dump(cell, f)
@staticmethod
def un_persist_cell_pickle(this_lat, this_lon):
"""
:param this_lat:
:param this_lon:
:return: cell
"""
if this_lat < 0:
lat_str = ("%03.2f" % (float(round(-this_lat * 100)) / 100.0)).replace('.', 'm')
else:
lat_str = ("%03.2f" % (float(round(this_lat * 100)) / 100.0)).replace('.', 'p')
if this_lon < 0:
lon_str = ("%03.2f" % (float(round(-this_lon * 100)) / 100.0)).replace('.', 'm')
else:
lon_str = ("%03.2f" % (float(round(this_lon * 100)) / 100.0)).replace('.', 'p')
filename = 'cell_' + lat_str + '_' + lon_str + '.pck'
with open(filename, "r") as f: # read mode
cell = pickle.load(open(f))
return cell
def temporal_filter(self, cell):
"""
:param cell:
This method does the filter model of the ont and returns a filtered outage based on the
alarm_condition (a value between 0 and 1)
Start with the alarm_condition =0 which is no alarm (These are alarm_conditions for ALARMs)
This is how the EPOCH number can be converted back and forth to a date.
In this context ON means power is ON, OFF means power is off
t is in milliseconds. To convert to minutes divide by 1000 and by 60.
:return:
"""
self.cell_time_event = False
for this_ont in cell['onts']:
event_vector = {'t': [int64(g_config.ENGINE_BEGIN_TIME)], 'a': [ON]}
on_times = cell['onts'][this_ont]['events'][ON]
off_times = cell['onts'][this_ont]['events'][OFF]
if len(on_times) > 0:
for this_alarm in on_times:
event_vector['t'].append(this_alarm)
event_vector['a'].append(ON)
if len(off_times) > 0:
for this_alarm in off_times:
event_vector['t'].append(this_alarm)
event_vector['a'].append(OFF)
# At this point we have a temporal vector of event for this ONT.
time_vector = array(event_vector['t'])
ind = argsort(time_vector)
power_state = array(event_vector['a'])[ind]
t = time_vector[ind]
# At this point the sorted time and alarm vectors are ready
# tw = t[t > t[-1] - config.ALARM_DETECT_WINDOW * 1000]
# aw = a[t > t[-1] - config.ALARM_DETECT_WINDOW * 1000]
# Deglitch the vectors now
# To deglitch the time vector take all the values that at ON and extend them by 5 minutes then
# and add (or) them back to the time vector
# time_of_alarm_condition = tw[-1] # The last time vector point (the sorted value)
# alarm_condition = aw[-1]
time_count = len(t)
deglitched_power_state = copy.copy(power_state)
# see for example http://pandas.pydata.org/pandas-docs/stable/timeseries.html
for i in range(time_count - 1):
if power_state[i] == OFF and power_state[i + 1] == ON:
if t[i + 1] < t[i] + g_config.DEGLITCH_TIME:
self.my_local_logger.debug(
"Deglitched the power at %s" % (pandas.to_datetime(t[i], unit='ms')))
deglitched_power_state[i] = ON
else:
self.my_local_logger.debug("off time is %f min (%f hours) (days %f)" % (
(t[i + 1] - t[i]) / 1000 / 60, (t[i + 1] - t[i]) / 1000 / 60 / 60,
(t[i + 1] - t[i]) / 1000 / 60 / 60 / 24))
power_state_array = []
time_array = []
for i in range(time_count-1):
time_array.append(t[i])
time_array.append(t[i+1] - g_config.MS_TIME_RESOLUTION) # something around 5 seconds
power_state_array.append(deglitched_power_state[i])
power_state_array.append(deglitched_power_state[i])
if deglitched_power_state[i] == ON:
self.my_local_logger.debug("power on at %s" % (pandas.to_datetime(t[i], unit='ms')))
if deglitched_power_state[i] == OFF:
self.my_local_logger.debug("power off at %s" % (pandas.to_datetime(t[i], unit='ms')))
time_array.append(t[-1])
power_state_array.append(deglitched_power_state[-1])
sample_time = cell['groom_time']
if sample_time > t[-1]:
self.my_local_logger.debug(
"sample time is after the end of time in the time event list, using interpolated value")
time_array.append(sample_time - g_config.MS_TIME_RESOLUTION)
power_state_array.append(deglitched_power_state[-1])
time_array_sec = [round(x / 1000) for x in time_array]
# time_domain_vector = [time_array, power_state_array] # column_stack((time_array,power_state_array))
# Calculate a +/- 1 week interval every 5 minutes from the groom time unless the groom time is the same as
# the current time then the last 30 minutes are used to compute the time vector.
# This is done to allow the real time groomer to run a bit faster than the interactive groomer during the
# interp call.
# The arrow library produces timestamp values in seconds.
current_time = arrow.utcnow().to('US/Eastern')
a_week_ago = current_time.replace(weeks=-1)
sample_time_arrow = arrow.get(sample_time/1000)
if sample_time_arrow.timestamp < a_week_ago.timestamp:
# This is a grooming operation that fits in the 2 week span of time.
start_time = sample_time_arrow.replace(weeks=-1)
stop_time = sample_time_arrow.replace(weeks=1)
else:
start_time = sample_time_arrow.replace(weeks=-2)
stop_time = sample_time_arrow
# The time vector will be in seconds
# One minute = 60
# One hour = 60*60
# One day = 24*60*60
# One week = 7*24*60*60
# Five minute intervals are 5*60
delta_time = 5*60 # This is the sample interval of the time vector (Every 5 minutes)
number_of_points = (stop_time.timestamp - start_time.timestamp) / delta_time
sample_time_array = linspace(start_time.timestamp, stop_time.timestamp, number_of_points)
sample_power_array = interp(sample_time_array, time_array_sec, power_state_array)
time_domain_vector = [sample_time_array, sample_power_array]
reliability = sum(sample_power_array)/len(sample_power_array)
event_durations = []
event_times = []
if sample_power_array.min() == sample_power_array.max():
self.SHOW_PLOTS = False
else:
self.SHOW_PLOTS = True
if self.SHOW_PLOTS:
if not g_config.IS_DEPLOYED:
print "Reliability = %4.4f" % reliability
if reliability > 0.8:
self.cell_time_event = True
if not g_config.IS_DEPLOYED:
try:
import matplotlib.pyplot as plt
# plt.plot(time_array, power_state_array, 'o')
plt.plot(sample_time_array, sample_power_array, '-x')
plt.show(block=False)
except:
print "Something went wrong with the matplotlib command, skipping!"
if (sample_power_array[0] > 0) and (sample_power_array[-1] > 0):
if not g_config.IS_DEPLOYED:
print "Diff the time vector to find the on and off times."
diff_sample_power_array = diff(sample_power_array)
index_on = diff_sample_power_array > 0
on_times = sample_time_array[index_on]
index_off = diff_sample_power_array < 0
off_times = sample_time_array[index_off]
if len(on_times) == len(off_times):
for k, t_off in enumerate(off_times):
# The power will be off from the time it turns minus the time it turned off.
power_fail_event_duration = on_times[k] - t_off
if not g_config.IS_DEPLOYED:
print "power fail event duration = %f" % power_fail_event_duration
event_durations.append(power_fail_event_duration)
event_times.append(t_off)
if not g_config.IS_DEPLOYED:
print "Found a %10.2f minute outage on %s" % (
(power_fail_event_duration/60),
arrow.get(t_off).format("MMMM DD, YYYY @ hh:mm A")
)
else:
self.my_local_logger.info('Power event edges are mismatched, skipping this: ')
else:
self.my_local_logger.info('Power event edges in the window are mismatched, skipping this: ')
else:
self.my_local_logger.info('Power event outage has low reliability, skipping this: ')
self.my_local_logger.info('temporal data for cell has %d points from %s to %s' % (
number_of_points, start_time, stop_time))
cell['onts'][this_ont]['temporal_filter'] = {'reliability': reliability,
'event_durations': event_durations,
'event_times': event_times,
'time_domain_vector': time_domain_vector}
return cell
def spatial_filter(self, cell):
"""
The spatial filter does a filtering of the ont collection based on the asset called this_asset.
:param cell:
A cell that contains of onts along with their locations and states.
The onts values must have been filtered temporally first.
:return:
"""
if self.cell_time_event:
# Only append outages on assets for the cells that have events
if not g_config.IS_DEPLOYED:
print "An interesting time event has occurred in this cell..."
for this_ont in cell['onts']:
event_durations = cell['onts'][this_ont]['temporal_filter']['event_durations']
event_times = cell['onts'][this_ont]['temporal_filter']['event_times']
if not g_config.IS_DEPLOYED:
if this_ont == "0016FE13":
print "found an event"
for this_asset in cell['onts'][this_ont]['assets']:
if not g_config.IS_DEPLOYED:
if this_asset == "TR1000489404_108":
print "found a matching asset"
try:
event_activities = cell['assets'][this_asset]['spatial_filter']
except KeyError:
event_activities = {'distance': [], 'events': []}
if len(event_durations) > 0:
ont_lat = cell['onts'][this_ont]['lat_lon'][0]
ont_lon = cell['onts'][this_ont]['lat_lon'][1]
lat_lon = cell['assets'][this_asset]['lat_lon']
asset_lat = lat_lon[0]
asset_lon = lat_lon[1]
this_distance = lat_lon_distance(asset_lat, asset_lon, ont_lat, ont_lon, units='mi')
event_activities['distance'].append(this_distance)
event_activities['events'].append(
{'event_durations': event_durations, 'event_times': event_times}
)
cell['assets'][this_asset]['spatial_filter'] = event_activities
if not g_config.IS_DEPLOYED:
print " ...done with interesting cell."
return cell
def vote_on_assets(self, cell, temporal_data, spatial_data, voting_data):
"""
:param cell:
:param voting_data: an integer that is the number of votes to use
:return:
"""
try:
this_filter = json.loads(spatial_data)
total_counts = len(this_filter['r'])
weights = []
for i in range(total_counts):
weights.append(this_filter['r'][i])
except TypeError as e:
self.my_local_logger.error('Spatial data has a Type Error: %s, %s' % (spatial_data, e))
except ValueError as e:
self.my_local_logger.error('Spatial data has a ValueError: %s, %s' % (spatial_data, e))
self.my_local_logger.info('spatial data = %s', spatial_data)
self.my_local_logger.info('temporal data = %s', temporal_data)
if voting_data:
try:
number_of_votes = int(voting_data)
except ValueError as e:
self.my_local_logger.error('Voting data has en error in the passed value %s' % e)
number_of_votes = 1
except TypeError as e:
self.my_local_logger.error('Voting data is not a string %s' % e)
number_of_votes = 1
else:
number_of_votes = 1
self.my_local_logger.info('Number of votes passed: %d' % number_of_votes)
for this_asset in cell['assets']:
cell['assets'][this_asset]['outage_events'] = None
try:
# these_distances = cell['assets'][this_asset]['spatial_filter']['distance']
these_events = cell['assets'][this_asset]['spatial_filter']['events']
except KeyError:
# print "No outages on this asset"
continue
if len(these_events) > 0:
if len(these_events) >= 1: # number_of_votes:
# This is where the filter will take place.
# These events is an array.
# I must iterate over an array of these event items
try:
outage_events = cell['assets'][this_asset]['outage_events']
except KeyError:
outage_events = {'event_durations': [], 'event_times': []}
if outage_events is None:
outage_events = {'event_durations': [], 'event_times': []}
for this_event_dict in these_events:
for j, this_event in enumerate(this_event_dict['event_durations']):
outage_events['event_durations'].append(this_event)
outage_events['event_times'].append(this_event_dict['event_times'][j])
cell['assets'][this_asset]['outage_events'] = outage_events
return cell
def post_outage_on_asset(self, cell, payload):
"""
:param cell:
:param payload: this will be of the form
http://10.123.0.27:8080/eon360/api/utilities?p=0&s=20
"eonUtilityEntries": [
{
"id": "5508dacee4b0df5309df591e",
"version": 0,
#######################
## ADD THIS GUID
"guid": "46f7655c-9160-4c08-b272-59c32232ba9f",
#######################
"company": "CEDRAFT",
"serviceAddress": "{\"CE Map ID\": \"None\",
\"Municipality\": \"New Castle\",
\"Provenance\":\"Report A\",
\"Attached Assets\": [],
\"Next Hop\": \"PS302355612\",
\"Type\": \"HOUSE\",
\"Downstream\": \"None\",
\"Transformer Supply\": [\"TR302355616_T4\"],
\"Upstream\":\"PS302355612\",
\"Connections\": [],
\"Address\":\"10 VALLEY VIEW RD, Chappaqua NY, 10514-2532\",
\"Utility ID\": \"None\"}",
"errorCode": "0",
"circuitID": "10U2",
"transformerID": "HS01c902165608e5f12ce4c01c78c70415",
"eligibilityList": [
{
"id": "54a079aae4b040db636a2d95",
"version": 0,
"guid": "23697667-4810-4169-8802-46ad6efae3a3",
"company": "",
"ontSerialNumber": "59054969",
"errorCode": "0.91",
"alarmID": "CHPQNYCPOL1*LET-3*11*1*1",
"ontAddress": "8 Brookside Cir,Chappaqua,NY,10514",
"modelCoefficients": null,
"longitude": f-73.787811,
"latitude": 41.175064,
"createdAtTimestamp": 1419803050366,
"lastModifiedAtTimestamp": 1419803050366
},
"payload": {
"company": "CEDRAFT",
"outageTime": 1430452800000,
"longitude": lon,
"latitude": lat,
"circuitID": "",
"assetID": "",
"votes": 3,
"spatial": '{"r":[1,1]}',
"temporal": "[1,0; .8,24; .3, 60]",
"reputationEnabled": True,
"zoomT": 1,
"zoomR": 1,
"radius": 0.12,
"units": "MI"
},
The post must be of the form
{
"eventDuration": "long",
"guid": "",
"id": "",
"utility": {
"assetType": "",
"circuitID": "",
"company": "",
"outageID": "",
"transformerID": ""
},
"timeOfEvent": "Date",
"company": "",
"longitude": 0,
"internalUtilityGuid": "",
"latitude": 0,
"algorithm": "",
"version": "long"
}
:return:
"""
# circuit_id = ""
# First loop over all circuits:
try:
for this_circuit in cell['circuits']:
# Now loop over all the items on that circuit
for this_asset in cell['circuits'][this_circuit]['connected_items']:
asset_item = cell['assets'][this_asset]
outages = asset_item['outage_events']
# This is the form of an event (If there is one!)
# It will be None if there are no events otherwise it will be:
# 'event_durations': copy.deepcopy(these_events['event_durations']),
# 'event_times': copy.deepcopy(these_events['event_times'])
if outages:
self.my_local_logger.info('Examining circuit=%s, asset=%s. which has %d outages to post!' % (this_circuit, this_asset, len(outages)))
if this_asset[0:2] == "TR":
asset_type = "TRANSFORMER"
elif this_asset[0:2] == "HS":
asset_type = "HOUSE"
elif this_asset[0:2] == "PS":
asset_type = "POLE, SECONDARY"
elif this_asset[0:2] == "PP":
asset_type = "POLE, PRIMARY"
else:
asset_type = "OTHER"
for i, this_event_duration in enumerate(outages['event_durations']):
address_string = cell['assets'][this_asset]['serviceAddress']
self.my_local_logger.info("address_string = %s" % address_string)
address_string_pairs = json.loads(address_string)
this_address = ''
if "Municipality" in address_string_pairs.keys():
this_address += 'Municipality:' + address_string_pairs['Municipality'] + '|'
if "Address" in address_string_pairs.keys():
this_address += 'Address:' + address_string_pairs['Address'] + '|'
# Here's how to include the CE Map ID and the Utility ID if needed
# this_address += 'CE MapID:' + this_asset.split('_')[1] + '|'
# this_address += 'UtilityID:' + this_asset.split('_')[0][2:]
if this_address[-1] == '|':
this_address = this_address[:-1]
utility_document = {
"internalUtilityGuid": asset_item['guid'],
"eventDuration": int(round(this_event_duration * 1000)),
# "guid": "guid-here",
# "id": 'id-here',
"utility": {
"assetType": asset_type,
"circuitID": this_circuit,
"company": payload["company"],
"outageID": 'outage-id-here',
"transformerID": this_asset,
"address": this_address
},
"timeOfEvent": int(round(outages['event_times'][i] * 1000)),
# "longitude": asset_item['lat_lon'][1],
# "latitude": asset_item['lat_lon'][0],
"algorithm": "NEAR10"
# "version": 0
}
if not g_config.IS_DEPLOYED:
print "Posting a %10.2f minute outage on %s, circuit: %s, asset_id: %s" % (
(utility_document['eventDuration'] / 1000 / 60),
arrow.get(utility_document['timeOfEvent'] / 1000).format("MMMM DD, YYYY @ hh:mm A"),
utility_document['utility']['circuitID'],
utility_document['utility']['transformerID']
)
self.my_local_logger.info('Posting: %s' % json.dumps(utility_document))
self.eon_api_bridge.groomed_outages_post_20(utility_document)
else:
if not g_config.IS_DEPLOYED:
print "Nothing to post for circuit: %s, asset_id: %s" % (
this_circuit,
this_asset
)
except:
self.my_local_logger.error('Posting outage error')
def build_in_memory_cell_db(self, cell):
"""
:param cell: A cell of data that represents the collection of onts, assets and circuits along with the alarms
Creates an in-memory data structure that has this information:
this_cell = {'neighbors': [], # the 6 nearest neighbors
'assets': {}, # The utility assets including their lat and lon and events
'onts': {}, # Verizon's ONTs including their lat and lon and events
'state': '' # A string representing the state of this cell.
This is used for multi threading purposes so that neighboring cells can see
whats going on.
'circuits': {} # This is a set of circuits in this cell. All assets on a circuit
are in the circuits list
'lat_lon': [] # The lat and lon array of the center of the cell
'radius': 1.00 # The radius of the circumscribed cell.
ont_items is a dictionary of {'lat_lon':[],'assets':[],'events':[]}
asset_items is a dictionary of {'lat_lon':[],'onts':[],'events':[]}
:return: none
"""
asset_dict = {'groom_time': cell['groom_time']}
for this_asset in cell['assets']:
asset_dict[this_asset] = SortedDict()
for this_ont in cell['assets'][this_asset]['onts']:
this_distance = lat_lon_distance(cell['assets'][this_asset]['lat_lon'][0],
cell['assets'][this_asset]['lat_lon'][1],
cell['onts'][this_ont]['lat_lon'][0],
cell['onts'][this_ont]['lat_lon'][1])
for this_event in cell['onts'][this_ont]['events'][0]:
event_key = int(this_event / 1000)
if event_key in asset_dict[this_asset]:
asset_dict[this_asset][event_key]['voters'].update({this_distance: this_ont})
else:
voters = SortedDict()
voters.update({this_distance: this_ont})
asset_dict[this_asset].update({event_key: {'state': 0, 'voters': voters}})
# self.my_local_logger.debug("%d,0,%s,%s,%f" % (event_key, this_ont, this_asset, this_distance)
for this_event in cell['onts'][this_ont]['events'][1]:
event_key = int(this_event / 1000)
if event_key in asset_dict[this_asset]:
asset_dict[this_asset][event_key]['voters'].update({this_distance: this_ont})
else:
voters = SortedDict()
voters.update({this_distance: this_ont})
asset_dict[this_asset].update({event_key: {'state': 1, 'voters': voters}})
# self.my_local_logger.debug("%d,1,%s,%s,%f" % (event_key, this_ont, this_asset, this_distance)
self.asset_dictionary = asset_dict
self.my_local_logger.debug("done with build_in_memory_cell_db")
@staticmethod
def compute_cell_guid(payload, resolution):
"""
Computes a GUID based on the lat lon and time value
"""
# query_guid = payload["query_guid"]
this_lat = payload["latitude"]
this_lon = payload["longitude"]
# utility = payload["company"]
outage_test_time = payload["outageTime"]
# circuit_id = payload["circuitID"]
# asset_id = payload["assetID"]
# votes = payload["votes"]
# spatial = payload["spatial"]
# temporal = payload["temporal"]
# reputation_ena = payload["reputationEnabled"]
# zoom_t = payload["zoomT"]
# zoom_r = payload["zoomR"]
# radius = payload["radius"]
# units = payload["units"]
# The number of decimal points in the lat and lon gridify the guid
fmt_str = "%%4.%df_%%4.%df_%%d" % (resolution, resolution)
this_guid = fmt_str % (this_lat, this_lon, outage_test_time)
cell_guid = this_guid.replace(".", "p").replace("-", "m")
timestamp_guid = "%d" % outage_test_time
return cell_guid, timestamp_guid
def save_cell_in_shared_mem(self, this_cell_guid, cell):
while not self.shared_data_lock.acquire(False):
self.my_local_logger.info('Waiting to acquire lock for shared data.')
time.sleep(g_config.SLEEP_TIME)
self.shared_data['cell_collection_set'].add(this_cell_guid)
self.shared_data['cell_collection_dict'][this_cell_guid] = cell
self.shared_data_lock.release()
def get_shared_data(self, query_type="all", dict_key=None):
my_shared_data = None
if query_type == "all":
while not self.shared_data_lock.acquire(False):
self.my_local_logger.info('groom_outages: waiting to acquire lock for shared data.')
time.sleep(g_config.SLEEP_TIME)
my_shared_data = copy.copy(self.shared_data)
self.shared_data_lock.release()
elif query_type == "cell_collection_dict":
while not self.shared_data_lock.acquire(False):
self.my_local_logger.info('groom_outages: waiting to acquire lock for shared data.')
time.sleep(g_config.SLEEP_TIME)
if dict_key is not None:
my_shared_data = copy.copy(self.shared_data['cell_collection_dict'][dict_key])
else:
my_shared_data = copy.copy(self.shared_data['cell_collection_dict'])
self.shared_data_lock.release()
elif query_type == "cell_collection_dict_keys":
while not self.shared_data_lock.acquire(False):
self.my_local_logger.info('groom_outages: waiting to acquire lock for shared data.')
time.sleep(g_config.SLEEP_TIME)
my_shared_data = copy.copy(self.shared_data['cell_collection_dict'].keys())
self.shared_data_lock.release()
elif query_type == "cell_collection_set":
while not self.shared_data_lock.acquire(False):
self.my_local_logger.info('groom_outages: waiting to acquire lock for shared data.')
time.sleep(g_config.SLEEP_TIME)
my_shared_data = copy.copy(self.shared_data['cell_collection_set'])
self.shared_data_lock.release()
return my_shared_data
def build_new_cell(self, this_cell_guid, this_items_payload, ttl):
"""
Builds a cell and stores it in local shared memory
"""
self.my_local_logger.debug("BUILDING_CELL %d, %s" % (self.cell_count, this_cell_guid))
t0 = time.time()
# Step 3) Query the API and find all utility assets within the region of interest
cell = self.get_data_in_cell_area(this_items_payload, ttl) # lat, lon, radius, this_time, ttl)
t1 = time.time()
self.my_local_logger.debug("API calls to get %d assets in a %f %s radius took %f seconds" %
(len(cell['assets']), cell['radius'], g_config.RADIUS_UNITS, (t1 - t0)))
self.persist_cell_pickle(cell, this_cell_guid)
self.my_local_logger.debug("Saved the cell pickle")
t0 = time.time()
self.build_in_memory_cell_db(cell)
t1 = time.time()
self.my_local_logger.debug("Building in memory data took %f seconds" % (t1 - t0))
# plot_assets(self.asset_dictionary)
# Step 4) Save this cell to the shared memory set
self.cell_count += 1
return cell
def mark_cell_in_shared_memory(self, cell_guid):
self.my_local_logger.debug("MARKING_CELL %s" % cell_guid)
while not self.shared_data_lock.acquire(False):
self.my_local_logger.info('Waiting to acquire lock for shared data.')
time.sleep(g_config.SLEEP_TIME)
self.shared_data['cell_collection_set'].add(cell_guid)
self.shared_data_lock.release()
def queue_to_publish(self, message):
while not self.outgoing_queue_lock.acquire(False):
self.my_local_logger.info('Groomer is waiting to acquire lock on publisher queue.')
time.sleep(g_config.SLEEP_TIME)
self.my_local_logger.debug("Groomer got consumer_queue_lock, ")
self.outgoing_q.put(message, False)
self.my_local_logger.debug(" after putting message in queue size is now: %d" % self.outgoing_q.qsize())
if self.outgoing_queue_lock:
self.outgoing_queue_lock.release()
self.my_local_logger.debug(
"Groomer released the consumer_queue_lock. Queue size is now:%d" % self.outgoing_q.qsize())
self.my_local_logger.info('Publish message queued, lock released.')
def groom_outages(self):
"""
This method grooms the outages by looking at the internal shared queue and pulling off the items that are
ready to be processed. The shared queue is passed between processes contains the cell data along with
processing state for each cell.
"""
#######################################################
# This is the general flow for the groom process
# When the queue is hit then it will have the start and end times along with the various parameters
# needed for the outage event calculation.
# When the queue item comes in then these steps happen.
#
# h) temporal filter : a string that represents time domain filter coefficients.
# The string will be of this form:
# "[1,0; .8,24; .3, 60]"
# "[w0,t0; w1,t1; w2, t2; ...]" were w0 is the weight (typically between 0 and 1)
# and t0 is the historical time
# (in minutes) from the event. In this example the following rules are used:
# At the event time, the alarm will be weighted with 1, 24 minutes before the event the alarm
# will be weighted by .8, 60 minutes before the event the alarm will be weighted by 0.3.
# For events that happen between the time weights a linear interpolation will be used.
# i) use reputation (flag) : a flag that says whether to use the reputation of the ONTs for voting
self.start_time = time.time()
self.my_local_logger.debug("GROOMING NOW")
# lat = 41.2693778
# lon = -73.8773389
# radius = 1.0 # config.START_RADIUS # = 0.12
# #################################################
# STEP 1 Pull items off the queue.
# self.pull_q_groom_command()
self.groomer_state = "1:GROOM"
groom_queue_len = len(self.local_q)
if groom_queue_len == 0:
self.my_local_logger.debug("NOTHING IN LOCAL QUEUE, returning")
self.groomer_state = "1.0:GROOM_RETURN_EARLY"
return
self.my_local_logger.debug("------------------ processing all %d items in the local_q" % groom_queue_len)
for _ in range(groom_queue_len):
# STEP 1) Pull items off the queue. The queue will consist of:
# a) time : in in microseconds that is desired for calculating the outage
# b) lat : latitude in decimal degrees
# c) lon : longitude in decimal degrees
# d) circuitID : circuit ID filter to be used for identification of a
# specific circuit within the area of interest
# e) assetID : asset ID filter to be used within the area of interest
# f) number of votes : number of votes to be used for qualifying the outage
# g) spatial filter : a string that represents radial filter coefficients. This is a string of the form:
# "[1,0; .2,.2; .3,.01]"
# "[w0,d0; w1,d1; w3,d3; ... ]" where w0 is the weight (typically 0 to 1)
# and d0 is the distance in miles or
# whatever the units are set to in the config file.
# The distance is the distance along a line that runs through the asset lat/lon and is parallel to the
# nearest upstream circuit segment. The ONT distance is projected to this circuit line and is filtered
# by the same spatial filter coefficients.
# In addition to the spatial filter the ONTs are weighted by their reputation
# (if the flag is set) which is
# calculated by an internally learned algorithm.
self.my_local_logger.debug(" Grooming local_q, size = %d" % len(self.local_q))
top_of_q_data = copy.copy(self.local_q.popleft()) # was popleft
self.groomer_state = "1.1:GROOM_POP_QUEUE"
self.my_local_logger.info("Got a local queue item.")
if "ttl" in top_of_q_data.keys():
ttl = top_of_q_data["ttl"]
else:
ttl = self.ttl
if top_of_q_data["payload"]['radius'] != self.working_radius:
self.resolution = compute_resolution(top_of_q_data["payload"]["radius"])
this_cell_guid, this_timestamp_guid = self.compute_cell_guid(top_of_q_data["payload"], self.resolution)
keys = self.get_shared_data('cell_collection_dict_keys')
collection_set = self.get_shared_data('cell_collection_set')
##################################################
# STEP 2) Look at the GUID generator for the lat and lon and see if the shared
# memory contains a cell structure for this item.
if this_cell_guid in keys: # my_shared_data['cell_collection_dict'].keys():
# 2.1) If it does contain the GUID then determine the state of that cell.
# 2.2) If the time stamp GUID of this cell GUID is within the resolution outage
# machine then continue with step 4.
self.groomer_state = "1.2:GROOM_FOUND_SHARED_DATA"
self.my_local_logger.debug("This cell is already in shared memory, "
"and is fully populated checking using a copy of it")
cell = self.get_shared_data('cell_collection_dict', this_cell_guid)
self.my_local_logger.debug("EXISTS: %s[%f,%f]TTL=%d" %
(this_cell_guid, cell["lat_lon"][0], cell["lat_lon"][1], cell["ttl"]))
else: # 2.3) If it does not contain the GUID or the time stamp GUID does not match then go to step 3.
# STEP 3) Query the API and find all utility assets within the region of interest
# (defined by a config parameter as the starting zoom level in miles)
# These will include house, transformers, poles, wires and so on.
# The first 2 letters of the assetID will be the item type. Save this cell to the shared memory set
# From this collection of assets create a SET of items in a shared queue that
# holds these items until so that other processes don't work on these items at the same time.
# The items will be filtered by assetID (item 1e) and circuitID (item 1d) if these fields are filled in.
cell = self.build_new_cell(this_cell_guid, top_of_q_data["payload"], ttl)
self.save_cell_in_shared_mem(this_cell_guid, cell)
self.my_local_logger.debug("CREATE: %s[%f,%f]TTL=%d" %
(this_cell_guid, cell["lat_lon"][0], cell["lat_lon"][1], ttl))
self.groomer_state = "1.3:GROOM_BUILD_NEW_CELLS"
# self.plot_assets()
# At this point the cell has been created and tested to be sure that its the one we want.
# Now examine the neighboring cells from this cells collection:
# STEP 4) Using the result of step 3 the cell is ready to be processed.
# 4.1) The next step is to look at each of the 6 neighboring cells.
# This is done by examining the 6 cells and determining their state.
# 4.1.1) Check the TTL count of this cell. If the TTL is zero continue to the next cell
# in the incoming Queue.
self.groomer_state = "1.4:GROOM_PROPAGATE_CELL"
if cell['ttl'] != 0:
for i, items in enumerate(cell['neighbors']): # the 6 nearest neighbors
this_neighbor_cell = items[0]
angle = items[1]
# The six neighbor cells are initially set to zero
# this_cell = {'neighbors': [["",0], ["",60], ["",120], ["",180], ["",240],["",300]],
# 'assets': {},
# 'onts': {},
# 'circuits': {},
# 'state': 'create',
# 'lat_lon': [lat, lon],
# 'radius': radius,
# 'groom_time': groom_time
# }
distance = 2 * top_of_q_data["payload"]["radius"]
if not this_neighbor_cell:
# We need to copy each of the neighbor cells to make sure we get a unique data structure
neighbor_cell_message = copy.copy(top_of_q_data)
self.my_local_logger.debug("%s neighbor[%d] is empty, [%f][%f], filling it now" %
(this_cell_guid, i, cell["lat_lon"][0], cell["lat_lon"][1]))
new_lat, new_lon = move_to_lat_lon(cell["lat_lon"][0], cell["lat_lon"][1], distance, angle)
# jump out of the loop if the cell is outside the region of interest
company_name = top_of_q_data['payload']['company']
if company_name not in self.utility_region.keys():
self.my_local_logger.error("Skipping cell rebroadcast "
"because company_name='%s' is not in utility_region." %
company_name)
self.groomer_state = "1.5.0:GROOM_ABORT_PROPAGATE"
continue
if (new_lat < self.utility_region[company_name]['min_latitude']) or \
(new_lat > self.utility_region[company_name]['max_latitude']) or \
(new_lon > self.utility_region[company_name]['max_longitude']) or \
(new_lon < self.utility_region[company_name]['min_longitude']):
# Here is where the outage time can be advanced by 2 weeks and run again.
if not g_config.IS_DEPLOYED:
print "Skipping neighbor cell rebroadcast at " \
"lat = %f, lon = %f because outside utility region." % \
(new_lat, new_lon)
self.my_local_logger.info("Skipping neighbor cell rebroadcast at "
"lat = %f, lon = %f because outside utility region." %
(new_lat, new_lon))
self.groomer_state = "1.5.1:GROOM_ABORT_PROPAGATE"
continue
neighbor_cell_message["payload"]["longitude"] = new_lon
neighbor_cell_message["payload"]["latitude"] = new_lat
new_cell_guid, new_timestamp_guid = self.compute_cell_guid(neighbor_cell_message["payload"],
self.resolution)
if new_cell_guid not in collection_set:
# STEP 5) Queue up a grooming process for neighboring cells that
# allows another process to pick up the outage calculation for the rest of the circuit.
# The neighboring cell is defined by outage location +/- 1 one patch area of
# interest in 6 hexagonal directions. This will create a small overlap on the cell corners.
self.groomer_state = "1.5.1:GROOM_QUEUE_NEIGHBOR"
self.my_local_logger.debug("queue length X = %d" % len(self.local_q))
self.mark_cell_in_shared_memory(new_cell_guid)
if cell['ttl'] == -1:
# If the TTL count is -1 then this is a full propagation list so this causes a
# post (publish) of a new query. Then continue with the next cell.
neighbor_cell_message["ttl"] = -1
else:
# Decrease the TTL count and post (publish) a new query.
# Then continue with the next cell.
neighbor_cell_message["ttl"] = cell['ttl'] - 1
self.my_local_logger.debug(" POST: %s[%f,%f]TTL=%d->%s[%f,%f]TTL=%d(%d)" %
(this_cell_guid, cell["lat_lon"][0], cell["lat_lon"][1], ttl,
new_cell_guid, new_lat, new_lon, neighbor_cell_message["ttl"],
angle))
########################
# This is the work around to just post the message back to the local_q instead of sending it
# out to the rabbit bus for parallel processing
####################################
# BURNED BY PYTHON
####################################
# The queue append does not copy the data, instead it just posts a pointer to the data
# self.local_q.append(copy.deepcopy(neighbor_cell_message))
# self.my_local_logger.debug("gueue length Y = %d" % len(self.local_q)
self.queue_to_publish(copy.deepcopy(neighbor_cell_message))
else:
self.groomer_state = "1.5.2:GROOM_LINK_NEIGHBOR"
# time.sleep(1)
self.my_local_logger.debug("Stitching %s's neighbor[%d]@[%f][%f] to this cell: %s" %
(this_cell_guid, i, cell["lat_lon"][0], cell["lat_lon"][1],
new_cell_guid))
self.my_local_logger.debug("SHARED: %s[%f,%f]TTL=%d->%s[%f,%f]TTL=%d (%d)" %
(this_cell_guid, cell["lat_lon"][0], cell["lat_lon"][1], ttl,
new_cell_guid, new_lat, new_lon, cell['ttl'], angle))
# If the cell is already in shared memory then just connect the cells neighbors
cell['neighbors'][i] = [new_cell_guid, angle]
self.save_cell_in_shared_mem(this_cell_guid, cell)
# STEP 6) OUTAGE CALCULATION
# at this point the outage region is contained within one cell.
# This is the process of grooming the outage. The data is ready to be used for calculating the outage.
# The filter algorithm was given above.
# 6.1) First the temporal filter is applied to the assets in the cell
self.groomer_state = "1.6:GROOM_COMPUTE_OUTAGE"
t_cell = self.temporal_filter(cell)
self.save_cell_in_shared_mem(this_cell_guid, t_cell)
# 6.2) Second the spatial filter is applied to each assets in the cell
s_cell = self.spatial_filter(t_cell)
self.save_cell_in_shared_mem(this_cell_guid, s_cell)
# 6.3) Once the filtered data is ready then the vote is applied to each ONT and the final vote is computed.
v_cell = self.vote_on_assets(s_cell,
top_of_q_data['payload']['temporal'],
top_of_q_data['payload']['spatial'],
top_of_q_data['payload']['votes'])
self.save_cell_in_shared_mem(this_cell_guid, v_cell)
# and the results is written back to the outage API.
self.my_local_logger.info("Calling post_outage_on_asset.")
self.my_local_logger.info("Posting this payload: %s" % json.dumps(top_of_q_data["payload"]))
self.post_outage_on_asset(v_cell, top_of_q_data["payload"])
self.end_time = time.time()
elapsed_process_time = fmax(self.end_time - self.start_time, .001)
self.groomer_state = "0:IDLE"
self.groom_run_state = "0:IDLE"
self.my_local_logger.info("Done. Elapsed time %f sec." % elapsed_process_time)
@staticmethod
def build_groom_payload(this_date, company=None, trigger_time=0, lat=0, lon=0, ttl=0):
"""
:param this_date: The date that the groom operation is to be examined
:param company: The utility company name associated with this alarm (if any)
:param trigger_time: The time of the alarm.
:param lat: Latitude of the alarm
:param lon: Longitude of the alarm
:param ttl: Time to live (set to 2 to limit the range of area to examine)
:return: The payload for the groom queue (or None if there is no utility)
Note that the first company is returned. There may be cases where the utilities may overlap. There are better
test methods for determining whether a point is in a region or not.
"""
this_payload = None
if company is None:
for this_company in g_config.UTILITY_REGION:
#:TODO: Replace with a better test method. See http://alienryderflex.com/polygon/
if g_config.UTILITY_REGION[this_company]['min_latitude'] < lat < \
g_config.UTILITY_REGION[this_company]['max_latitude'] and \
g_config.UTILITY_REGION[this_company]['min_longitude'] < lon < \
g_config.UTILITY_REGION[this_company]['max_longitude']:
company = this_company
break
if company is not None:
this_payload = {"dateTime": this_date,
"payload": {"company": company,
"outageTime": trigger_time,
"longitude": lon,
"latitude": lat,
"circuitID": "",
"assetID": "",
"votes": 0,
"spatial": '{"r":[1,1]}',
"temporal": "[1,0; .8,24; .3, 60]",
"reputationEnabled": True,
"zoomT": 1,
"zoomR": 1,
"radius": 0.12,
"units": "MI"
},
"messageType": "Save",
"ttl": ttl
}
return this_payload
@staticmethod
def build_payload(this_date, this_company, this_trigger_time, this_lat, this_lon, ttl):
this_payload = {"dateTime": this_date,
"payload": {"company": this_company,
"outageTime": this_trigger_time,
"longitude": this_lon,
"latitude": this_lat,
"circuitID": "",
"assetID": "",
"votes": 0,
"spatial": '{"r":[1,1]}',
"temporal": "[1,0; .8,24; .3, 60]",
"reputationEnabled": True,
"zoomT": 1,
"zoomR": 1,
"radius": 0.12,
"units": "MI"
},
"messageType": "Save",
"ttl": ttl
}
return this_payload
def utility_groom(self, utility_name="ALL", location=None, ttl=g_config.TTL_MAX):
"""
Triggers a Utility wide grooming process by setting up a ttl of -1 and injecting it into the Rabbit MQ bus.
When called, the outage test location is calculated by starting in the center of the geographic location
using the current time for outage detection.
All utilities in the utility dictionary will be groomed when this method is called.
:param: utility_name: the utility to groom or ALL for all
:param: location: is the groom location which will be the starting point of the groom process. If the value is
passed in and is not none then the groom will occur within a TTL_MAX region of this location
:return:
"""
# TODO: The best approach here is to trigger the outage groom at the center of the last alarm.
# trigger_time = arrow.get("2015-01-09T19:42:33.689-0400").timestamp*1000
trigger_date = arrow.utcnow().to('US/Eastern').format('YYYY-MM-DDTHH:mm:ss.SSSZ')
trigger_time = arrow.get(trigger_date).timestamp*1000
if location is None:
ttl = g_config.TTL_RANDOM_GROOM
if utility_name in self.utility_region.keys():
r = random.random()
this_lat = r * (self.utility_region[utility_name]['max_latitude'] -
self.utility_region[utility_name]['min_latitude']) + \
self.utility_region[utility_name]['min_latitude']
r = random.random()
this_lon = r * (self.utility_region[utility_name]['max_longitude'] -
self.utility_region[utility_name]['min_longitude']) + \
self.utility_region[utility_name]['min_longitude']
this_payload = self.build_groom_payload(trigger_date, utility_name, trigger_time, this_lat, this_lon, ttl)
self.my_local_logger.info("SEEDED %s" % this_payload)
if this_payload is not None:
self.queue_to_publish(this_payload)
else:
for company in self.utility_region.keys():
r = random.random()
this_lat = r * (self.utility_region[company]['max_latitude'] -
self.utility_region[company]['min_latitude']) + \
self.utility_region[company]['min_latitude']
r = random.random()
this_lon = r * (self.utility_region[company]['max_longitude'] -
self.utility_region[company]['min_longitude']) + \
self.utility_region[company]['min_longitude']
this_payload = self.build_groom_payload(trigger_date, company, trigger_time, this_lat, this_lon, ttl)
self.my_local_logger.info("SEEDED %s" % this_payload)
if this_payload is not None:
self.queue_to_publish(this_payload)
else:
if utility_name in self.utility_region.keys():
this_lat = location["lat"]
this_lon = location["lon"]
this_payload = self.build_groom_payload(trigger_date, utility_name, trigger_time, this_lat, this_lon,
ttl)
self.my_local_logger.info("SEEDED %s" % this_payload)
if this_payload is not None:
self.queue_to_publish(this_payload)
else:
for company in self.utility_region.keys():
this_lat = location["lat"]
this_lon = location["lon"]
this_payload = self.build_groom_payload(trigger_date, company, trigger_time, this_lat, this_lon,
ttl)
self.my_local_logger.info("SEEDED %s" % this_payload)
if this_payload is not None:
self.queue_to_publish(this_payload)
def run(self):
# self.my_local_logger.push
self.run_start_time = time.time()
report_time = self.run_start_time + g_config.KEEP_ALIVE_INTERVAL
self.my_local_logger.debug("Started at %f" % self.run_start_time) # "backend_msg_handler.run")
while self.run_enable:
# Also add a timeout so that if the queue isn't full it processes alarms anyway.
elapsed_time = time.time() - self.run_start_time
if time.time() > report_time:
self.my_local_logger.info("|OK dT|%10.3f|(s)|%10.3f|e|%10.3f|elp|%10.3f|state|%s|groomer state|%s" %
(self.end_time - self.start_time,
self.start_time,
self.end_time,
elapsed_time,
self.groom_run_state,
self.groomer_state)
)
report_time = time.time() + g_config.KEEP_ALIVE_INTERVAL
self.idle_count += 1
self.groom_run_state = "1:REPORT"
queue_depth = len(self.local_q)
groom_now = False
if queue_depth > g_config.QUEUE_SIZE_BLOCK:
groom_now = True
self.my_local_logger.info("Analyzing after %f sec because queue size is %d" %
(elapsed_time, queue_depth)) # , "backend_msg_handler.run")
elif queue_depth > 0 and (elapsed_time > g_config.MESSAGE_EXPIRATION_SEC):
groom_now = True
self.my_local_logger.info("Analyzing after %f sec because time expired." %
elapsed_time) # , "backend_msg_handler.run")
# when the backend message queue is QUEUE_SIZE_BLOCK then block this thread and process the queue
if groom_now:
self.groom_run_state = "2:GROOMING"
self.groom_outages()
# need to acquire a lock when pulling from the queue
if not self.incoming_rabbit_mq.empty():
self.idle_count = 0
self.my_local_logger.debug("Message received, calling the process_incoming_queue now: %f" %
elapsed_time)
self.groom_run_state = "3:PROCESS_QUEUE"
self.process_incoming_rabbit_mq()
# set the run_start_time to begin timing at the time that the last message was queued
self.run_start_time = time.time()
def join(self, timeout=None):
self.run_enable = False
self.my_local_logger.info("Stopping at %f" % (time.time()))
if __name__ == "__main__":
from g_pika_rabbit_bridge import MqConsumer, MqPublisher
import logging.handlers
import datetime
BASE_DIR = 'C:\\repo\\personal\\myDocs\\Aptect\\Verizon\\Workproduct\\EON-IOT\\groomer'
LOG_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(funcName)s %(lineno)5d :%(message)s'
########################
# LOG FILE SETUP
########################
unique_str = datetime.datetime.now().isoformat().replace(':', '_').replace('.', '_').replace('-', '_')
try:
os.mkdir(BASE_DIR + os.sep + g_config.LOG_DIR)
except OSError or WindowsError:
print "Log directory exists"
try:
os.mkdir(BASE_DIR + os.sep + g_config.PICKLES)
except OSError or WindowsError:
print "Pickles directory exists"
LOG_FILENAME = BASE_DIR + os.sep + g_config.LOG_DIR + os.sep + 'top_' + unique_str + '.log'
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=5000000, backupCount=50)
handler.setFormatter(logging.Formatter(LOG_FORMAT, datefmt='%m-%d %H:%M:%S'))
handler.setLevel(logging.DEBUG)
# Add this handler to the root logger
logging.getLogger('').addHandler(handler)
shared_data_top = {'thread_name_pool': set(), # This contains a set of thread names that are sharing this data
'master_ont_set': set(), # This contains all the ONTs that have been seen for this groom cycle
'master_ont_dict': {}, # This contains the dictionary of ONTs that have been seen
"cell_collection_set": set(), # This contains all the cell guids that have been seen so far
"cell_collection_dict": {}} # This is a dictionary of the cell quids that have been seen
# and have been filled in with cell data
shared_data_lock_top = threading.Lock()
rabbit_message_queue = Queue.Queue()
rabbit_queue_lock = threading.Lock()
# EON_MQ_IP = '10.123.0.20'
EON_MQ_IP = 'localhost'
EON_MQ_UN = 'manager' # 'manager' #
EON_MQ_PW = 'e0n36o' # 'manager' #
EON_MQ_PORT = 5672
EON_MQ_BASE = '/#/queues'
EON_MQ_VHOST = 'eon360'
EON_MQ_QUEUE = 'collection-notification'
EON_GROOM_QUEUE = 'grooming-notification'
connection_string = 'amqp://' + EON_MQ_UN + ':' + EON_MQ_PW + '@' + EON_MQ_IP + ':' + \
('%d' % EON_MQ_PORT) + '/' + EON_MQ_VHOST
consumer = MqConsumer(connection_string, rabbit_message_queue, rabbit_queue_lock, EON_GROOM_QUEUE)
# # Can probably use the next line to look for a failed pika bridge.
# It will be None if the connection is not available.
# consumer.__dict__['_connection']
publish_message_queue = Queue.Queue()
publish_queue_lock = threading.Lock()
publisher = MqPublisher(connection_string, publish_message_queue, publish_queue_lock, EON_GROOM_QUEUE)
groomer = GroomingMessageHandler(incoming_q=rabbit_message_queue,
incoming_queue_lock=rabbit_queue_lock,
outgoing_q=publish_message_queue,
outgoing_queue_lock=publish_queue_lock,
module_instance_name='Handler01',
shared_data=shared_data_top,
shared_data_lock=shared_data_lock_top)
groomer.run_enable = True
groomer.start()
consumer.start()
publisher.start()
run_mode = True
try:
# This is Corlandt NY
# This is what a groom payload should look like:
# The spec version 1.1 shows this format
# {
# “queryGuid": "dffdd6e5-79df-4da7-9a6d-84a8d3ead772", A unique ID that is created
# when the query button is clicked.
# “type”: "Query", Message type that is to be processed
# Type of Action can be one of:
# Save: Save button clicked on the GUI
# Test: Query button clicked when the mode selection is Test
# Query: Query button clicked when the mode selection is Query (default)
# Clear: User browses away from page
#
# "payload": { The payload of the data from the web page form
# "company": "CEDRAFT", The company name being used on this web page
# "outageTime": 1414011303715, The datetime from the web page form
# "latitude": 41.07597, Latitude (optional)
# "longitude": -74.011081, Longitude (optional)
# "circuitID",: "", Circuit ID (optional), as known by the utility
# "assetID": "", Asset ID (optional), as known by the utility (transformer)
# "votes": 3, Votes (optional) to use for outage 1 to 10
# "spatial": "[1,0; .2,.2; .3,.01]", A spatial vector string (optional) consisting of weight,
# distance pairs
# "temporal":"[1,0; .8,24; .3, 60]", A temporal vector string (optional) consisting of weight,
# time pairs
# "reputationEnabled": true, The state of the reputation check box. If checked then
# this value is true otherwise false
# "zoomT": 1, The current zoom level of the time in the display plot
# "zoomR": 1, The current zoom level of the radius in the display plot
# “radius”: 1 The radius to use for the starting zoom level
# “units” : "MI" The units of the radius. (MI or KM)
# }
# }
# This will be the outage time of the test (January 9th, 2015)
# The types of messages implemented are Query, Save
lat = 41.2693778
lon = -73.8773389
radius = 1.0 # config.START_RADIUS # = 0.12
outage_time = arrow.get("2015-01-09T19:42:33.689-0400").timestamp*1000
today = arrow.utcnow().to('US/Eastern').format('YYYY-MM-DDTHH:mm:ss.SSSZ')
groom_payload = {"queryGuid": "4a1b34bc-9739-4b40-85e1-8f464fe98211",
"dateTime": today,
"payload": {
"company": "CEDRAFT",
"outageTime": outage_time,
"longitude": lon,
"latitude": lat,
"circuitID": "",
"assetID": "",
"votes": 3,
"spatial": '{"r":[1,1]}',
"temporal": "[1,0; .8,24; .3, 60]",
"reputationEnabled": True,
"zoomT": 1,
"zoomR": 1,
"radius": 0.12,
"units": "MI"
},
"messageType": "Save"
}
publisher.message = groom_payload
while True:
pass
groomer.join()
consumer.join()
publisher.join()
except KeyboardInterrupt:
groomer.join()
# consumer.join()
|
14,972 | cfc013980821c6b4a7f81bac475b8993b5b44a65 | admin_credentials = {'userName': 'Administrator', 'password': 'wpsthpvse1'}
enclosure_name = "CN75120D7B"
drive_enclosure_name = "CN75120D7B, bay 3"
expected_number_of_DE = 1
expected_number_of_drives = 8
|
14,973 | 53139af1063dd8c2a5595ac727704a7b8d8665b8 | from src.SegmenterWindow import SegmenterWindow
from PySide2.QtWidgets import QApplication
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = SegmenterWindow()
window.setGeometry(100, 50, 1000, 1000)
window.show()
sys.exit(app.exec_())
|
14,974 | cee5867565e33d825914487f7502e64305745b82 | # Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from pprint import pprint
import numpy as np
from neat.reporting import BaseReporter
class LogReporter(BaseReporter):
def __init__(self, filename, evaluator, eval_with_debug=False, log_macs=False):
self.log = open(filename, "a")
self.generation = None
self.generation_start_time = None
self.generation_times = []
self.num_extinctions = 0
self.eval_with_debug = eval_with_debug
self.log_dict = {}
self.evaluator = evaluator
self.eval_best = self.evaluator.eval_genome
self.log_macs = bool(log_macs)
assert not (self.log_macs and not self.evaluator.track_macs), \
f"If you want to log MAC operations, your fitness evaluator also has to track them"
def start_generation(self, generation):
self.log_dict["generation"] = generation
self.generation_start_time = time.time()
def end_generation(self, config, population, species_set):
ng = len(population)
self.log_dict["pop_size"] = ng
ns = len(species_set.species)
self.log_dict["n_species"] = ns
elapsed = time.time() - self.generation_start_time
self.log_dict["time_elapsed"] = elapsed
self.generation_times.append(elapsed)
self.generation_times = self.generation_times[-10:]
average = np.mean(self.generation_times)
self.log_dict["time_elapsed_avg"] = average
if self.log_macs:
try:
self.log_dict["cumulative_macs"] = self.evaluator.CUMMACS
self.evaluator.CUMMACS = 0 # NOTE only track per generation; TODO add as option
except AttributeError as AE:
raise Exception(f"{AE}; if this logger is initialized with log_macs=True, its evaluator must also be intitialized with track_macs=True")
self.log_dict["n_extinctions"] = self.num_extinctions
pprint(self.log_dict)
self.log.write(json.dumps(self.log_dict) + "\n")
def post_evaluate(self, config, population, species, best_genome):
# pylint: disable=no-self-use
fitnesses = [c.fitness for c in population.values()]
fit_mean = np.mean(fitnesses)
fit_std = np.std(fitnesses)
self.log_dict["fitness_avg"] = fit_mean
self.log_dict["fitness_std"] = fit_std
self.log_dict["fitness_best"] = best_genome.fitness
print("=" * 50 + " Best Genome: " + "=" * 50)
if self.eval_with_debug:
print(best_genome)
best_fitness_val = self.eval_best(
best_genome, config, debug=self.eval_with_debug
)
self.log_dict["fitness_best_val"] = best_fitness_val
n_neurons_best, n_conns_best = best_genome.size()
self.log_dict["n_neurons_best"] = n_neurons_best
self.log_dict["n_conns_best"] = n_conns_best
def complete_extinction(self):
self.num_extinctions += 1
def found_solution(self, config, generation, best):
pass
def species_stagnant(self, sid, species):
pass
|
14,975 | 37bc0011ca12da8995ad97176de3002e52aa3f40 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import argparse
import logging
import os
import json
import pickle
from pathlib import Path
from utils import Tokenizer, Embedding
from dataset import Seq2SeqDataset
from tqdm import tqdm
import torch
from torch import nn
from torch.autograd import Variable as V
import numpy as np
import torch.nn.functional as F
from argparse import ArgumentParser
# In[2]:
encode_path='model3_encoder.pkl'
decode_path='model3_decoder.pkl'
parser = ArgumentParser()
parser.add_argument('--output_path')
args = parser.parse_args()
# In[3]:
with open('./data.pkl', 'rb') as file:
valid =pickle.load(file)
with open('./embedding.pkl', 'rb') as file:
embed = pickle.load(file)
# In[4]:
BATCH_SIZE = 10
WORD_VECTOR_LENGTH = 300
hidden_size = 150
num_layers=2
vocab_size = embed.vectors.shape[0]
b_dir = 2
# In[5]:
valid_loader = torch.utils.data.DataLoader(dataset=valid, batch_size=BATCH_SIZE, shuffle=False,collate_fn=valid.collate_fn)
# In[6]:
class ENCODER(nn.Module):
def __init__(self):
super(ENCODER, self).__init__()
embedding_weight = embed.vectors
self.embedding = nn.Embedding.from_pretrained(embedding_weight)
self.rnn = nn.GRU(WORD_VECTOR_LENGTH, hidden_size, num_layers,batch_first=True, bidirectional=True)
def forward(self, x):
embed_vector = self.embedding(x)
output, hidden = self.rnn(embed_vector)
#out = self.out(h_n[::,-1])
#return torch.tanh(out)
return output,hidden
# In[13]:
class AttnDecoderRNN(nn.Module):
def __init__(self):
super(AttnDecoderRNN, self).__init__()
embedding_weight = embed.vectors
self.embedding = nn.Embedding.from_pretrained(embedding_weight)
self.attn = nn.Linear((WORD_VECTOR_LENGTH+hidden_size*num_layers*b_dir), 300)
self.rnn = nn.GRU(600, hidden_size, 2, batch_first=True, bidirectional=True)
self.fc = nn.Linear(300, vocab_size)
def forward(self, input, hidden, encoder_output,batch_num):
# encode_output.shape = [9, 299, 300] = [batch , input_len , hidden*dir]
# encode_hidden = [4, 9, 150] = [num_layer , batch,hidden ]
# encode_output.shape = [9, 299, 150]
embed_vector = self.embedding(input) #embed_vector=[9, 1, 300]
tmp_hidden = hidden
tmp_hidden = tmp_hidden.transpose(0, 1) # hidden = [9, 6, 50]
tmp_hidden = tmp_hidden.contiguous().view(1,1,-1) # hidden = [9, 1, 600]
tt = torch.cat( (embed_vector,tmp_hidden),2 )
tt = self.attn(tt)
attn_weights = F.softmax(tt, dim=2) #attn_weights = ([9, 1, 300])
attn_weights = attn_weights.narrow(2, 0, encoder_output.shape[1]) #attn_weights = ([9, 1, 299])
attn_applied = torch.bmm(attn_weights , encoder_output ) # attn_applied =[9, 1, 300]
attn_embed = torch.cat((attn_applied,embed_vector),2 )
output,hidden = self.rnn(attn_embed,hidden)
output = self.fc(output)
return output,hidden
# In[14]:
encoder = torch.load(encode_path)
decoder = torch.load(decode_path)
encoder.cuda()
decoder.cuda()
print(encoder)
print(decoder)
# In[15]:
f = open(args.output_path,mode='w')
for i,text in enumerate(tqdm(valid_loader)):
tmp = text['text'][0]
batch_num = text['text'].shape[0]
btach_num = batch_num
for j in range(1,batch_num):
b = text['text'][j]
tmp=torch.cat( (tmp,b) ,dim = 0)
tmp = tmp.view(batch_num,-1) # tmp (batch , seq_len)
tmp = V(tmp).cuda()
encode_output,encode_hidden = encoder(tmp)
# encode_output.shape = [10, 300, 300]
# encode_hidden.shape = [4, 10, 150]
start_out = torch.zeros([batch_num,1],dtype=torch.long).cuda()
for m in range(batch_num):
start_out[m] = text['text'][m][0]
start_out = torch.unsqueeze(start_out,-1)
for m in range(batch_num):
e_hidden = encode_hidden[:,m,:]
e_hidden = torch.unsqueeze(e_hidden,1).contiguous()
ec = encode_output[m,::]
ec = torch.unsqueeze(ec,0).contiguous()
out_words=[]
#print(start_out[m].shape) = [1, 1]
#print(hidden.shape) = [4, 1, 150]
#print(ec.shape) = [1, 300, 300]
for j in range(80):
if j==0:
decoder_output,decoder_hidden = decoder(start_out[m],e_hidden,ec,batch_num)
#print(e_hidden)
#print(decoder_hidden)
#print(decoder_output.shape) [1, 1, 113378]
#print(decoder_hidden.shape) [4, 1, 150]
#decoder_output = F.softmax(decoder_output,2)
max_prob,max_index = torch.max(decoder_output,2)
output_words = embed.vocab[max_index.item()]
output_index = max_index.item()
else:
input_vocab = torch.tensor([output_index])
input_vocab = input_vocab.unsqueeze(-1)
input_vocab = input_vocab.cuda()
decoder_output,decoder_hidden = decoder(input_vocab ,decoder_hidden,ec,batch_num)
max_prob,max_index = torch.max(decoder_output,2)
output_words = embed.vocab[max_index.item()]
output_index = max_index.item()
if output_index==2: #<eos>
break
if output_index==3 or output_index==1 or output_index==0: #<unk>
continue
out_words.append(output_words)
out_str = " ".join(out_words)
ans_list={}
ans_list['id'] = text['id'][m]
ans_list['predict'] = out_str
f.write(json.dumps(ans_list) + "\n")
f.close()
# In[ ]:
# In[ ]:
|
14,976 | 42a9657c4e46be69a1ad152dc38c241864421a85 | '''
Created on 27. nov. 2014
@author: JohnArne
'''
from hmac import trans_36
import requests
import os
from sentiwordnet import SentiWordNetCorpusReader, SentiSynset
import nltk
from pos_mappings import TYPECRAFT_SENTIWORDNET
import gettext
import codecs
import subprocess
import pickle
class Lexicon():
"""
Handles the interfacing with the sentiment lexicon as well as translation and disambiguation.
"""
def __init__(self, translater, sentiment_lexicon):
#initialize sentiment lexicon resource and translation
self.translater = translater
self.sentiment_lexicon = sentiment_lexicon
def translate_and_get_lexicon_sentiment(self, word, context=None, pos_tag=None):
"""
Returns the translated sentiment values for all the words with their contexts and pos tags.
"""
#Translate word
translated_word = self.translater.translate(word)
return self.sentiment_lexicon.get_values(translated_word, context, pos_tag)
def translate_sentence_and_get_lexicon_sentiment(self, sentence):
"""
Returns the translated sentiment values for a whole sentence.
"""
#Translate word
translated_sentence = self.translater.translate(sentence)
translated_words = tokenizer(translated_sentence)
sentiments = []
for word in translated_words:
sentiment = self.sentiment_lexicon.get_values(word)
if sentiment!=None:
sentiments.append(sentiment)
return sentiments
class SentiWordNetLexicon():
def __init__(self):
SWN_FILENAME = "lexicon\SentiWordNet_3.0.0_20130122.txt"
self.swn= SentiWordNetCorpusReader(SWN_FILENAME)
def get_values(self, word, context=None, pos_tag=None):
"""
Perform lookup in SentiWordNet
"""
# entry = swn.senti_synset("breakdown.n.03")
entries = None
for w in word.split(' '):
entries = self.swn.senti_synsets(w)
if entries != None: break
if entries is None or len(entries)==0:
return None
if len(entries)==1 or pos_tag is None:
return [entries[0].pos_score, entries[0].neg_score, entries[0].obj_score]
elif len(entries)>1:
#Find out which word to chose, if there are several classes
print "Several entries ",entries
for entry in entries:
if entry.synset.pos()==TYPECRAFT_SENTIWORDNET[pos_tag]:
print "Found matching entry: ", entry
return [entry.pos_score, entry.neg_score, entry.obj_score]
return [entries[0].pos_score, entries[0].neg_score, entries[0].obj_score]
return None
class BingTranslater():
def __init__(self, words):
self.original_words = words
file = codecs.open("bing_words.txt", "w", "utf8")
for word in words:
file.write(word+"\n")
file.close()
print "Bing translating ",len(words)," words..."
subprocess.call("lexicon/bingtranslater.exe")
file = codecs.open("translated_words.txt", "r", "utf8")
translated_words = file.readlines()
file.close()
self.translation_mapping = dict(zip(self.original_words, translated_words))
print "Bing done..."
def translate(self, word):
try:
return self.translation_mapping[word]
except KeyError:
return None
class GoogleTranslater():
def __init__(self):
self.translation_url = "https://translate.google.com/#no/en/"
#The lines of words contain the original word first, then subsequent translations in english
self.words = codecs.open("bing_words.txt", "r", "utf8").read().splitlines()
def translate(self, word, context=None, pos_tag=None):
"""
Translate word using a translation API
Perform sentence contezt translation on google web interface
Perform word translation using Bing -> get all alternatives anc check for a mathc in the google translation, if match choose it as translation
if not then choose the bing translation that best matches using POS tag?
"""
#Get contextual translation from google translate
par = {"text": word, "raw": "raw"}
r = requests.post(self.translation_url, data=par)
results = r.text
translated_word = get_from_html_text(results, 'TRANSLATED_TEXT')
#Perform lookup in the text file from the C# translator
#if there is no match, take the best match from the bing file
# print "Translated: ", word, " ->", translated_word
return translated_word
def get_from_html_text(resultset, target):
"""
Gets the value of a variable target from a html result set from a request.
"""
index = resultset.find(target)+len(target)+2
return resultset[index:index+140].split("'")[0].lower()
def perform_bing_sentiment_lexicon_lookup(tweets):
"""
Performs sentiment lexicon lookup on the tweets, and stores it in the objects.
"""
words = []
for t in tweets:
for phrase in t.tagged_words:
for word in phrase:
try:
if word["pos"] in TYPECRAFT_SENTIWORDNET:
words.append(word['word'])
except KeyError:
continue
lex = Lexicon(BingTranslater(words), SentiWordNetLexicon())
words_with_sentimentvalues=[]#list of dicts
print "Getting sentiment values"
for t in tweets:
sentiwords =[]
sentiwords_with_values={}
for phrase in t.tagged_words:
for word in phrase:
try:
if word["pos"] in TYPECRAFT_SENTIWORDNET:
sentiwords.append(word['word'])
except KeyError:
continue
for sentiword in sentiwords:
sentivalues = lex.translate_and_get_lexicon_sentiment(sentiword)
if sentivalues!=None:
print "Adding sentivalues: ",sentivalues
sentiwords_with_values[sentiword] = sentivalues
words_with_sentimentvalues.append(sentiwords_with_values)
return words_with_sentimentvalues
def perform_google_sentiment_lexicon_lookup(tweets):
"""
Performs sentiment lexicon lookup on the tweets, and stores it in the objects.
"""
lex = Lexicon(GoogleTranslater(), SentiWordNetLexicon())
print "Getting sentiment values"
tweet_sentiments = []
for t in tweets:
tweet_sentiments.append(lex.translate_sentence_and_get_lexicon_sentiment(t.text))
print tweet_sentiments
reduced_tweet_sentiments = []
for sentiments in tweet_sentiments:
polar_sum = sum([s[0] for s in sentiments])
negative_sum = sum([s[1] for s in sentiments])
objective_sum = sum([s[2] for s in sentiments])
reduced_tweet_sentiments.append((polar_sum, negative_sum, objective_sum))
print reduced_tweet_sentiments
return reduced_tweet_sentiments
def tokenizer(sentence):
"""
Tokenizes an english sentence.
"""
words = []
for phrase in sentence.split('.'):
for piece in phrase.split(','):
for word in piece.split(' '):
words.append(word)
return words
if __name__ == '__main__':
#Insert all words to be translated into the googlebing translator in order to augment with Bing...
lex = Lexicon(BingTranslater(), SentiWordNetLexicon())
print lex.translate_and_get_lexicon_sentiment("good")
# swn = SentiWordNetCorpusReader('SentiWordNet_3.0.0_20130122.txt')
# for senti_synset in swn.all_senti_synsets():
# print senti_synset.synset.name, senti_synset.pos_score, senti_synset.neg_score |
14,977 | ed7a0fd7ef4383817a3a350ff3a78c7ddafd0419 | """
http://memosisland.blogspot.com/2013/04/reading-binary-data-files-written-in-c.html
"""
import numpy as np
import matplotlib.pylab as plt
def load_file(fileHead):
dt = np.dtype("f4, f4, f4, f4, f4, f4, f4, f4, f4, f4, f4, f4, i8")
# dt = np.dtype([('x', f4), ('vx', f4), ('y', f4), ('vy', f4), ('z', f4), ('vz', f4), ('phi', f4), ('idx', i8)])
num_part = 128 ** 3
tot_entry = 0
# xyz = np.zeros(shape=(3))
# vxvyvz = np.zeros(shape=(3))
x = []
y = []
z = []
vx = []
vy = []
vz = []
qx = []
qy = []
qz = []
sx = []
sy = []
sz = []
# phi = []
idx = []
for fidx in range(8):
np_file = np.fromfile(fileHead + str(fidx), dtype=dt)
tot_entry = tot_entry + np.shape(np_file)[0]
x = np.append(x, np.array(np_file['f0']))
y = np.append(y, np.array(np_file['f4']))
z = np.append(z, np.array(np_file['f8']))
vx = np.append(vx, np.array(np_file['f1']))
vy = np.append(vy, np.array(np_file['f5']))
vz = np.append(vz, np.array(np_file['f9']))
qx = np.append(qx, np.array(np_file['f2']))
qy = np.append(qy, np.array(np_file['f6']))
qz = np.append(qz, np.array(np_file['f10']))
sx = np.append(sx, np.array(np_file['f3']))
sy = np.append(sy, np.array(np_file['f7']))
sz = np.append(sz, np.array(np_file['f11']))
idx = np.append(idx, np.array(np_file['f12']))
# phi = np.append( phi, np.array(np_file['f6']) )
print tot_entry
# print np_file[0]
print(10 * '-')
# print(np.shape(np_file))
# print((128**3)*8)
return idx, x, y, z, vx, vy, vz, sx, sy, sz, qx, qy, qz
# ==============================================================
if __name__ == "__main__":
z_arr = [1, 5, 159]
growth_arr = [0.624913, 0.219980, 0.008534]
#z_arr = [0.01, 1, 5, 10]
#growth_arr = [0.995227, 0.624913, 0.219980, 0.120347]
z0_ind = 0
z1_ind = 1
z0 = z_arr[z0_ind]
dirIn = "../sims/sim_z"+str(z0)+"/output/"
DataDir = "../HACC/" # "../"
# dirIn = DataDir + "sims/simi1Gpc_z" + str(z0) + "/output/"
# fileIn = "m000.full.hcosmo.IC."
# _, x, y, z, _, _, _, sx, sy, sz, qx, qy, qz = load_file(dirIn + fileIn)
# rho3d = dtfe3d(x, y, z, ngr=128)
L = 20.
ngr = 128
# rho3d = cic3d(x, y, z, L, size_fact= ngr)
rho_z0 = np.load(DataDir + 'Data/cic_'+str(int(L))+'_'+str(ngr)+'_z_' +str(z0) + '.npy')
dsxdqx = np.load(DataDir + 'Data/dsxdqx_' + str(int(L)) + '_' + str(ngr) + '_z_' + str(z0) + '.npy')
dsxdqy = np.load(DataDir + 'Data/dsxdqy_' + str(int(L)) + '_' + str(ngr) + '_z_' + str(z0) + '.npy')
dsxdqz = np.load(DataDir + 'Data/dsxdqz_' + str(int(L)) + '_' + str(ngr) + '_z_' + str(z0) + '.npy')
dsydqx = np.load(DataDir + 'Data/dsydqx_' + str(int(L)) + '_' + str(ngr) + '_z_' + str(z0) + '.npy')
dsydqy = np.load(DataDir + 'Data/dsydqy_' + str(int(L)) + '_' + str(ngr) + '_z_' + str(z0) + '.npy')
dsydqz = np.load(DataDir + 'Data/dsydqz_' + str(int(L)) + '_' + str(ngr) + '_z_' + str(z0) + '.npy')
dszdqx = np.load(DataDir + 'Data/dszdqx_' + str(int(L)) + '_' + str(ngr) + '_z_' + str(z0) + '.npy')
dszdqy = np.load(DataDir + 'Data/dszdqy_' + str(int(L)) + '_' + str(ngr) + '_z_' + str(z0) + '.npy')
dszdqz = np.load(DataDir + 'Data/dszdqz_' + str(int(L)) + '_' + str(ngr) + '_z_' + str(z0) + '.npy')
rho_z1 = np.zeros( (ngr, ngr, ngr) )
delta_ij = np.zeros((3, 3), int)
np.fill_diagonal(delta_ij, 1.0)
for xi in range(ngr):
for yi in range(ngr):
for zi in range(ngr):
DsDq = np.array([ [dsxdqx[xi, yi, zi], dsydqx[xi, yi, zi], dszdqx[xi, yi, zi]],
[dsydqx[xi, yi, zi], dsydqy[xi, yi, zi], dsydqz[xi, yi, zi]],
[dszdqx[xi, yi, zi], dszdqy[xi, yi, zi], dszdqz[xi, yi, zi]] ] )
zel_scaling1 = np.linalg.det( delta_ij + ((growth_arr[z0_ind]/growth_arr[
z1_ind])*(DsDq.T) ))
numer = np.linalg.det( delta_ij + (growth_arr[z0_ind]*(DsDq) ) )
denom = np.linalg.det( delta_ij + (growth_arr[z1_ind]*(DsDq) ) )
zel_scaling = numer/denom
rho_z1[xi, yi, zi] = zel_scaling1*rho_z0[xi, yi, zi]
######## PLOTTING ############
# import matplotlib as mpl
# mpl.pyplot.viridis()
# mpl.pyplot.magma()
# mpl.pyplot.inferno()
# mpl.pyplot.plasma()
# mpl.pyplot.summer()
z1 = z_arr[z1_ind]
rho_z1_real = np.load(DataDir + 'Data/cic_'+str(int(L))+'_'+str(ngr)+'_z_' +str(z1) + '.npy')
cmap = plt.get_cmap('afmhot')
fig = plt.figure(132, figsize=(8,8))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax4 = fig.add_subplot(224)
ax1.imshow( rho_z0[:,:,53], vmin= 0, vmax= 5, cmap = cmap, alpha = 0.8)
ax2.imshow( rho_z1_real[:,:,53], vmin= 0, vmax= 5, cmap = cmap, alpha = 0.8)
# ax4.imshow( rho_z1[:,:,53], vmin= 0, vmax= 5, cmap = cmap , alpha = 0.8)
ax4.imshow( rho_z1[:,:,53], vmin= 0, vmax= 5, cmap = cmap , alpha = 0.8)
ax1.set_title(r'$\rho$(z =' + str(z0) + ')')
ax2.set_title(r'$\rho$(z =' + str(z1) + ')')
ax4.set_title(r'$\rho$ (ZA mapped)')
# plt.colorbar(ax1)
plt.show()
# from matplotlib.colors import ListedColormap
#
# viridis = ListedColormap(_viridis_data, name='viridis')
#
# plt.register_cmap(name='viridis', cmap=viridis)
# plt.set_cmap(viridis)
|
14,978 | 264fa5a9cf3f125ce8ad5afa43e1710626fde4e9 | #-*- coding:utf-8 -*-
import jieba
import sys
import codecs
import ipdb
def cut_and_write(input_file, output):
with open(output,'w+') as fo:
with open(input_file, 'r') as fi:
for line in fi:
stripped_line = line.strip().decode("utf-8", 'ignore').split('\t')
linenum, sentence0, sentence1, label = stripped_line
#ipdb.set_trace()
seg0, seg1 = jieba.cut(sentence0), jieba.cut(sentence1)
cut_sentence = linenum + "\t" + ' '.join(seg0) + '\t' + ' '.join(seg1) + "\t" + label + "\n"
fo.write(cut_sentence.encode('utf-8'))
print cut_sentence
if __name__ == "__main__":
#input = "atec_nlp_sim_train.csv"
#output = "atec_nlp_sim_train_split.csv"
input_file = sys.argv[1]
output = sys.argv[2]
#ipdb.set_trace()
cut_and_write(input_file, output)
|
14,979 | fb76a8997cd76275ae63b65f6e19ccb8ad5b33e9 | str_me = "abcd"
str_arr = ["a","b","c","d"]
for s in str_arr[::]:
print("{}".format(s), end="")
print("")
print("<< This is same >>")
for s in str_me[::]:
print("{}".format(s), end="")
print("") |
14,980 | 75f3bb9661f0f327a4764d2618264813ddea4ab1 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google3.third_party.explainable_ai_sdk.sdk.metadata.parameters."""
import tensorflow as tf
from explainable_ai_sdk.metadata import parameters
class ParametersTest(tf.test.TestCase):
def test_visualization_params_asdict_no_values_set(self):
params = parameters.VisualizationParameters()
self.assertEmpty(params.asdict())
def test_visualization_params_asdict_some_values_set(self):
params = parameters.VisualizationParameters(
type=parameters.VisualizationType.OUTLINES, clip_above_percentile=0.25)
d = params.asdict()
self.assertIn("type", d)
self.assertIn("outlines", d["type"])
self.assertIn("clip_above_percentile", d)
self.assertAlmostEqual(0.25, d["clip_above_percentile"])
self.assertNotIn("overlay_type", d)
def test_visualization_params_asdict_enums_to_text(self):
params = parameters.VisualizationParameters(
overlay_type=parameters.OverlayType.NONE,
color_map=parameters.ColorMap.VIRIDIS)
d = params.asdict()
self.assertEqual("viridis", d["color_map"])
self.assertEqual("none", d["overlay_type"])
def test_domain_info_asdict(self):
domain_info = parameters.DomainInfo(min=0.1, max=0.9, original_mean=6)
d = domain_info.asdict()
self.assertEqual(0.1, d["min"])
self.assertEqual(6, d["original_mean"])
self.assertNotIn("original_stddev", d)
if __name__ == "__main__":
tf.test.main()
|
14,981 | 50eb131d8b7c4f0c08276a7b8648514d39f23937 | # © Copyright Databand.ai, an IBM Company 2022
import pytest
from more_itertools import last
from dbnd import task
from dbnd._core.current import try_get_current_task_run
from dbnd._core.tracking.commands import set_external_resource_urls
from dbnd.testing.helpers_mocks import set_tracking_context
from test_dbnd.tracking.tracking_helpers import get_save_external_links
@pytest.mark.usefixtures(set_tracking_context.__name__)
class TestSetExternalResourceURLS(object):
def test_set_external_resource_urls(self, mock_channel_tracker):
@task()
def task_with_set_external_resource_urls():
set_external_resource_urls(
{
"my_resource": "http://some_resource_name.com/path/to/resource/123456789"
}
)
task_run = try_get_current_task_run()
return task_run.task_run_attempt_uid
task_run_attempt_uid = task_with_set_external_resource_urls()
save_external_links_call = last(get_save_external_links(mock_channel_tracker))
assert save_external_links_call["external_links_dict"] == {
"my_resource": "http://some_resource_name.com/path/to/resource/123456789"
}
assert save_external_links_call["task_run_attempt_uid"] == str(
task_run_attempt_uid
)
def test_set_external_resource_urls_without_links_values(
self, mock_channel_tracker
):
@task()
def task_with_set_external_resource_urls():
set_external_resource_urls({"resource": None})
task_with_set_external_resource_urls()
call = next(get_save_external_links(mock_channel_tracker), None)
assert call is None
def test_set_external_resource_urls_without_links(self, mock_channel_tracker):
@task()
def task_with_set_external_resource_urls():
set_external_resource_urls({})
task_with_set_external_resource_urls()
call = next(get_save_external_links(mock_channel_tracker), None)
assert call is None
def test_set_external_resource_urls_without_running_task(
self, mock_channel_tracker
):
set_external_resource_urls(
{"my_resource": "http://some_resource_name.com/path/to/resource/123456789"}
)
call = next(get_save_external_links(mock_channel_tracker), None)
assert call is None
|
14,982 | 0329e00fc0ce4235c409072027c148218d004f5e | # -*- coding:utf-8 -*-
# __author__ = 'gupan'
"""
数据库操作接口:
在conf.settings模块中设置了DATABASE
"""
from conf import settings
import os
import sys
import json
class Db:
"""
改进建议(未实现功能):
该类封装了一个数据库的初始化以及sql执行函数
建议后期封装一个Db_Handle类,该类集成了sql语句的执行方法,而Db类的功能只是初始化Db_Handle对象
"""
"""
根据conf.settings模块中的DATABASE字典中的engine字典判断操作数据库的类型
file_storage:文件类型
mysql:mysql数据库
oracle:oracle数据库
__init__():
待实现,考虑将db_handle()函数的功能在此函数中实现,返回一个self.handler对数据库进行操作
/**
*考虑实现代码如下
*db = Db()
*db(sql)#执行sql语句
**/
db_handle():
用户判断数据库类型,返回不同类型的数据库操作对象
file_db_handle():
载入文件类型数据库链接信息,返回操作文件类型数据库的对象
mysql_db_handle():
载入mysql类型数据库链接信息,返回操作mysql类型数据库的对象
oracle_db_handle():
载入oracle类型数据库链接信息,返回操作oracle类型数据库的对象
mysql_execute():
执行sql语句操作mysql数据库
file_execute():
执行sql语句操作文本文件
oracle_execute():
执行sql语句操作oracle
"""
def __init__(self):
pass
def db_handle(self):
db_dic = settings.DATABASE
db_type = db_dic["engine"]
if db_type == "file_storage":
return self.file_db_handle()
elif db_type == "mysql":
return self.mysql_db_handle()
elif db_type == "oracle":
return self.oracle_db_handle()
else:
exit("\033[31;1m this function is under develping, please wait\033[0m")
def file_execute(self, sql):
'''
file_execute(sql)函数详解
需要处理的sql语句的类型
"SELECT * FROM accounts WHERE name = %s AND passwd = %s"%(name, passwd)
"SELECT * FROM accounts WHERE name = %s"%(name,)
"INSERT INTO accounts (****) VALUES (%s)"%json.dumps(data)
"UPDATE accounts SET balance = {balance}, status = {status}, credit = {credit} WHERE name = {name}".format(balance = balance, status = status, credit = credit, name = name)
'''
'''
优化建议:
sql语句中将表名写死了,后续需做好动态参数化
用os.path.isdir()判断该表是否存在
'''
if "SELECT" in sql:
'''
用split分割sql语句,用name判断accounts表中是否有{name}.json表存在,如果存在验证密码
'''
head, body = sql.split("WHERE")
head = head.strip()
body = body.strip()
name, passwd = None, None
if "AND" in body:
name, passwd = body.split("AND")
name = name.strip().split("=")[1].strip()
passwd = passwd.strip().split("=")[1].strip()
else:
name = body.split("=")[1].strip()
account_path = settings.BASE_DIR + "\\db\\accounts\\{name}.json".format(name = name)
#验证登陆
if passwd:
if not os.path.exists(account_path):
exit("\033[31;1m您还未注册,请先注册\033[0m")
with open(account_path, "r") as f_account:
b_data = f_account.read()
data = json.loads(b_data)
if data["passwd"] != passwd:
return False
return data
#验证注册
else:
if not os.path.exists(account_path):
return True
else:
return False
if "INSERT" in sql:
'''
用split分割sql语句,用name判断accounts表中是否有{name}.json表存在,不能注册,如果不存在,注册并写入数据
'''
head, body = sql.split("VALUES")
head = head.strip()
body = (body.strip().lstrip("(").rstrip(")")).split(",")
for index in range(len(body)):
body[index] = body[index].strip()
head = head.split("(")[1].strip().split(")")[0].strip().split(",")
for index in range(len(head)):
head[index] = head[index].strip()
index = head.index("name")
name = body[index]
account_path = settings.BASE_DIR + "\\db\\accounts\\{name}.json".format(name = name)
if os.path.exists(account_path):
return False
data = {}
with open(account_path, "w") as f_new:
for item in head:
index = head.index(item)
data[item] = body[index]
f_new.write(json.dumps(data))
return data
if "UPDATE" in sql:
'''
用split分割sql语句,用name判断accounts表中是否有{name}.json表存在,修改数据
'''
head, body = sql.split("SET")
head = head.strip()
body = body.strip()
updates, where = body.split("WHERE")
updates = updates.strip()
where = where.strip()
name = where.split("=")[1].strip()
account_path = settings.BASE_DIR + "\\db\\accounts\\{name}.json".format(name = name)
if os.path.exists(account_path):
return False
with open(account_path, "r") as f_account:
b_data = f_account.read()
data = json.loads(b_data)
items = updates.split(",")
for item in items:
key, value = item.split("=")
key = key.strip()
value = value.strip()
data[key] = value
with open(account_path, "w") as f_update:
f_update.write(json.dumps(data))
return data
exit("\033[31;0m Error SQL\033[0m")
def mysql_execute(self, sql):
pass
def oracle_execute(self, sql):
pass
def file_db_handle(self):
return self.file_execute
def mysql_db_handle(self):
print("\033[31;1m mysql \033[0m this function is under develping, please wait")
return self.mysql_execute
def oracle_db_handle(self):
print("\033[31;1m oracle \033[0m this function is under develping, please wait")
return self.oracle_execute |
14,983 | 84c770269f7c9c67646654d8f3fffafa7c3f6e1b | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 11:44:30 2013
@author: jinpeng.li@cea.fr
"""
import unittest
import numpy as np
from sklearn import datasets
from epac import Methods
from epac.workflow.splitters import WarmStartMethods
from epac.tests.utils import comp_2wf_reduce_res
from epac.tests.utils import compare_two_node
class TOY_CLF:
def __init__(self, v_lambda):
self.v_lambda = v_lambda
self.v_beta = None
def _get_error(self, v_beta, v_lambda, X, y):
pred_y = np.dot(X, v_beta)
ss_error = np.sum((y - pred_y) ** 2, axis=0)
return ss_error
def transform(self, X, y):
len_beta = X.shape[1]
min_err = 0
if not (self.v_beta is None):
min_err = self._get_error(self.v_beta, self.v_lambda, X, y)
# Search the beta which minimizes the error function
# ==================================================
for i in range(10):
v_beta = np.random.random(len_beta)
err = self._get_error(v_beta, self.v_lambda, X, y)
if (self.v_beta is None) or err < min_err:
self.v_beta = v_beta
min_err = err
pred_y = np.dot(X, self.v_beta)
return {"y/pred": pred_y, "y/true": y, "best_beta": self.v_beta}
class TestWorkFlow(unittest.TestCase):
def test_prev_state_methods(self):
## 1) Build dataset
## ================================================
X, y = datasets.make_classification(n_samples=5,
n_features=20,
n_informative=2)
Xy = {"X": X, "y": y}
methods = Methods(*[TOY_CLF(v_lambda=v_lambda)
for v_lambda in [2, 1]])
methods.run(**Xy)
ps_methods = WarmStartMethods(*[TOY_CLF(v_lambda=v_lambda)
for v_lambda in [2, 1]])
ps_methods.run(**Xy)
self.assertTrue(compare_two_node(methods, ps_methods))
self.assertTrue(comp_2wf_reduce_res(methods, ps_methods))
if __name__ == '__main__':
unittest.main()
|
14,984 | e7a51ae5c4e28a953255ab196c67b8513d039515 | #!C:\Python38\Python
import datetime
import tkinter as tk
from PIL import Image,ImageTk
window=tk.Tk()
window.geometry("300x400")
window.title(" Age Calculator App ")
name = tk.Label(text = "Name")
name.grid(column=0,row=1)
year = tk.Label(text = "Year")
year.grid(column=0,row=2)
month = tk.Label(text = "Month")
month.grid(column=0,row=3)
date = tk.Label(text = "Day")
date.grid(column=0,row=4)
nameEntry = tk.Entry()
nameEntry.grid(column=1,row=1)
yearEntry = tk.Entry()
yearEntry.grid(column=1,row=2)
monthEntry = tk.Entry()
monthEntry.grid(column=1,row=3)
dateEntry = tk.Entry()
dateEntry.grid(column=1,row=4)
def getInput():
name=nameEntry.get()
year=int(yearEntry.get())
age = 2022-year
textArea = tk.Text(master=window,height=10,width=25)
textArea.grid(column=1,row=6)
answer = " Hi {}!!!. You are {} years old!!! ".format(name, age)
textArea.insert(tk.END,answer)
button=tk.Button(window,text="Calculate Age",command=getInput)
button.grid(column=1,row=5)
image=Image.open('mypage.jpg')
image.thumbnail((200,500),Image.ANTIALIAS)
photo=ImageTk.PhotoImage(image)
label_image=tk.Label(image=photo)
label_image.grid(column=1,row=0)
window.mainloop()
|
14,985 | 423b378a83eef4af3fe31f9ebfef411cdfde6a32 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import httplib
import uuid
from absl.testing import absltest
import mock
import webtest
from google.appengine.api import users
from google.appengine.ext import deferred
from google.appengine.ext import testbed
from cauliflowervest.server import main as gae_main
from cauliflowervest.server import settings
from cauliflowervest.server.handlers import maintenance
from cauliflowervest.server.handlers import test_util
from cauliflowervest.server.models import volumes as models
class MaintenanceModuleTest(test_util.BaseTest):
def setUp(self):
super(MaintenanceModuleTest, self).setUp()
self.testapp = webtest.TestApp(gae_main.app)
@mock.patch.dict(
settings.__dict__, {'XSRF_PROTECTION_ENABLED': False})
@mock.patch.object(
maintenance.users, 'is_current_user_admin', return_value=True)
def testWalkthrough(self, _):
models.ProvisioningVolume(
owner='stub', created_by=users.get_current_user(),
hdd_serial='stub', passphrase=str(uuid.uuid4()),
created=datetime.datetime.now(), platform_uuid='stub',
serial='stub', volume_uuid=str(uuid.uuid4()).upper(), tag='v1'
).put()
resp = self.testapp.get('/api/internal/maintenance/update_volumes_schema')
self.assertEqual(200, resp.status_int)
taskqueue = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)
tasks = taskqueue.get_filtered_tasks()
self.assertEqual(8, len(tasks))
for task in tasks:
deferred.run(task.payload)
self.assertEqual('v1', models.ProvisioningVolume.all().fetch(1)[0].tag)
@mock.patch.dict(
settings.__dict__, {'XSRF_PROTECTION_ENABLED': False})
@mock.patch.object(
maintenance.users, 'is_current_user_admin', return_value=False)
def testAccessDenied(self, _):
self.testapp.get(
'/api/internal/maintenance/update_volumes_schema',
status=httplib.FORBIDDEN)
if __name__ == '__main__':
absltest.main()
|
14,986 | bf879a3d67e8ab433c03519b21ce21a116f90692 | from datetime import datetime
from concesionario import *
if __name__ == "__main__":
coches1 = [ Coche("V40", 2.0, "Diesel", 120, "", datetime.date(2019,2,2), 0),
Coche("S90", 2.0, "Diesel", 150, "", datetime.date(2017,11,15), 23435) ]
coches2 = [ Coche("A3", 1.0, "Gasolina", 115, "", datetime.date(2018,8,5), 1453),
Coche("A4", 2.0, "Gasolina", 125, "", datetime.date(2019,4,4), 0) ]
coches3 = [ Coche("Corolla", 1.5, "Híbrido", 115, "", datetime.date(2018,7,5), 7564),
Coche("C-HR", 1.8, "Híbrido", 125, "", datetime.date(2019,2,3), 0) ]
marcas = [ Marca("Volvo", "Suecia", coches1),
Marca("Audi", "Alemania", coches2),
Marca("Toyota", "Japón", coches3) ]
concesionario = Concesionario("Rafael Multimotor", marcas)
busquedaMarca = input("Marca: ")
busquedaModelo = input("Modelo: ")
busquedaCombustible = input("Combustible: ")
busquedaPotencia = int(input("Potencia: "))
busquedaMaxKm = int(input("Kilometros máximos: "))
busquedaMinAno = datetime.datetime.strptime(input("Año mínimo: "), '%d-%m-%Y')
resultado = concesionario.buscar(Busqueda(busquedaMarca, busquedaModelo, busquedaCombustible, busquedaPotencia, busquedaMaxKm, busquedaMinAno))
if len(resultado) == 0:
print("\nNo se han encontrado coincidencias")
else:
for coche in resultado:
print(coche) |
14,987 | caef589b135da0b341888fe8efb9b21574873ca5 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('athletes', '0006_auto_20150726_0203'),
('trainings', '0008_auto_20150803_2141'),
]
operations = [
migrations.AddField(
model_name='exercise',
name='target',
field=models.ForeignKey(verbose_name='Тип', blank=True, to='athletes.Target', null=True),
),
migrations.AddField(
model_name='exercise',
name='zone',
field=models.ForeignKey(verbose_name='Группа мышц', blank=True, to='athletes.Zone', null=True),
),
]
|
14,988 | d77e1ab60c32dbe46a40f895a5fba8cce8b5786f | from suds import Client
class TimeFilter:
"""Represents a Deep Security TimeFilter Transport"""
def __init__(self, suds_client, rangeFrom=None, rangeTo=None, specificTime=None, time_type="LAST_HOUR"):
self.rangeFrom = rangeFrom
self.rangeTo = rangeTo
self.specificTime = specificTime
self.client = suds_client
self.time_type = time_type
def get_transport(self):
tft = self.client.factory.create('TimeFilterTransport')
tft.rangeFrom = self.rangeFrom
tft.rangeTo = self.rangeTo
tft.specificTime = self.specificTime
etft = self.client.factory.create('EnumTimeFilterType')
types = {"LAST_HOUR": etft.LAST_HOUR,
"LAST_24_HOURS": etft.LAST_24_HOURS,
"LAST_7_DAYS": etft.LAST_7_DAYS,
"CUSTOM_RANGE": etft.CUSTOM_RANGE,
"SPECIFIC_TIME": etft.SPECIFIC_TIME}
if types[self.time_type]:
tft.type = types[self.time_type]
elif self.rangeFrom is not None and self.rangeTo is not None:
tft.type = etft.CUSTOM_RANGE
elif self.specificTime is not None:
tft.type = etft.SPECIFIC_TIME
else:
tft.type = etft.LAST_HOUR
return tft
|
14,989 | 1d533c1e874d172d413ef20fe147434f193cb910 | import requests
from bs4 import BeautifulSoup
import pandas as pd
from selenium import webdriver
import time
import pickle
LL_URL = 'https://jollyturns.com/resort/united-states-of-america/loveland-ski-area/skiruns-green'
AB_URL = 'https://jollyturns.com/resort/united-states-of-america/arapahoe-basin/skiruns-green'
C_URL = 'https://jollyturns.com/resort/united-states-of-america/copper-mountain-resort/skiruns-green'
E_URL = 'https://jollyturns.com/resort/united-states-of-america/eldora-mountain-resort/skiruns-green'
AM_URL = 'https://jollyturns.com/resort/united-states-of-america/alpine-meadows/skiruns-green'
V_URL = 'https://jollyturns.com/resort/united-states-of-america/vail-ski-resort/skiruns-green'
M_URL = 'https://jollyturns.com/resort/united-states-of-america/monarch-ski-area/skiruns-green'
CB_URL = 'https://jollyturns.com/resort/united-states-of-america/crested-butte-mountain-resort/skiruns-green'
T_URL = 'https://jollyturns.com/resort/united-states-of-america/taos-ski-valley/skiruns-green'
DP_URL = 'https://jollyturns.com/resort/united-states-of-america/diamond-peak/skiruns-green'
WP_URL = 'https://jollyturns.com/resort/united-states-of-america/winter-park-resort/skiruns-green'
BC_URL = 'https://jollyturns.com/resort/united-states-of-america/beaver-creek-resort/skiruns-green'
URLs = [LL_URL,AB_URL,C_URL,E_URL,AM_URL,V_URL,M_URL,CB_URL,T_URL,DP_URL,WP_URL,BC_URL]
LL_nums = [10,17,38,23,12,1]
AB_nums = [7,6,33,37,36,2] # A basin
C_nums = [22,27,30,50,25,9]
E_nums = [9,6,26,9,8,0]
AM_nums = [13,2,39,49,0,1]
V_nums = [29,29,59,104,7,1]
M_nums = [5,13,16,25,7,3]
CB_nums = [13,24,41,18,44,0]
T_nums = [13,13,20,37,50,0]
DP_nums = [6,2,14,14,0,1]
WP_nums = [25,32,38,78,9,7] # WP
BC_nums = [24,42,47,39,12,2]
nums = [LL_nums,AB_nums,C_nums,E_nums,AM_nums,V_nums,M_nums,CB_nums,T_nums,DP_nums,WP_nums,BC_nums]
browser = webdriver.PhantomJS()
def make_tables(URL,nums):
'''
Inputs:
URL from URLs (str)
nums of trails of each color from nums (list)
Outputs:
4 tables of trails by color (tuple of tables)
'''
browser.get(URL)
time.sleep(3)
soup = BeautifulSoup(browser.page_source,'html.parser')
rows = soup.select('table.table.table-striped tbody tr')
table_lst = []
for row in rows:
cell_lst = [cell for cell in row if cell != ' ']
cell_lst = [cell.text for cell in cell_lst]
table_lst.append(cell_lst)
a,b,c,d,e,f = nums
lifts = table_lst[:a]
greens = table_lst[a:a+b]
blues = table_lst[a+b:a+b+c]
blacks = table_lst[a+b+c:a+b+c+d]
bb = table_lst[a+b+c+d:a+b+c+d+e]
tp = table_lst[a+b+c+d+e:a+b+c+d+e+f]
restaurants = table_lst[a+b+c+d+e+f:]
return greens, blues, blacks, bb
lift_cols = ['Name', 'Bottom', 'Top', 'Vertical Rise']
def make_run_df(lst):
'''
Inputs:
table from make_tables (table)
Outputs:
dataframe of trails of that color (DataFrame)
'''
runs_cols = ['Name', 'Bottom (ft)', 'Top (ft)', 'Vertical Drop (ft)', 'Length (mi)']
df = pd.DataFrame(lst)
df.columns = runs_cols
for i in range(len(df['Length (mi)'])):
if df['Length (mi)'][i][-2:] == 'ft':
df['Length (mi)'][i] = round(float(df['Length (mi)'][i][:-2])/5280,2)
else:
df['Length (mi)'][i] = float(df['Length (mi)'][i][:-2])
for col in runs_cols[1:-1]:
df[col] = df[col].apply(lambda x: float(x[:-2])) ## except some lengths are in feet...
df['Average Steepness'] = (df['Vertical Drop (ft)']/(5280*df['Length (mi)'])).astype(float)
df['Length (mi)'] = df['Length (mi)'].astype(float)
return df
# WP_runs = make_tables(WP_URL,WP_nums)
# AB_runs = make_tables(AB_URL,AB_nums)
#
# WP_greens, WP_blues, WP_blacks, WP_bb = WP_runs
# AB_greens, AB_blues, AB_blacks, AB_bb = AB_runs
#
# WP_green_df = make_run_df(WP_greens)
# WP_blue_df = make_run_df(WP_blues)
# WP_black_df = make_run_df(WP_blacks)
# WP_bb_df = make_run_df(WP_bb)
#
# AB_green_df = make_run_df(AB_greens)
# AB_blue_df = make_run_df(AB_blues)
# AB_black_df = make_run_df(AB_blacks)
# AB_bb_df = make_run_df(AB_bb)
def make_df_dicts(URL,nums):
'''
Inputs:
URL from URLs (str)
nums from nums (list)
Outputs:
dictionary of {level: level_df} (dict)
'''
resort = {}
greens, blues, blacks, bb = make_tables(URL,nums)
levels = ['green','blue','black','bb']
for i,j in zip(levels,[greens,blues,blacks,bb]):
if len(j) == 0:
resort[i] = None
else:
resort[i] = make_run_df(j)
return resort
loveland_script = ['Loveland', 'Arapahoe Basin', 'Copper', 'Eldora', 'Alpine Meadows']
vail_script = ['Vail']
monarch_script = ['Monarch', 'Crested Butte', 'Taos']
DP_script = ['Diamond Peak']
WP_script = ['Winter Park']
BC_script = ['Beaver Creek']
resorts = loveland_script + vail_script + monarch_script + DP_script + WP_script + BC_script
dct = {} # {resort: {level: level_df}}
for resort,URL,nums in zip(resorts,URLs,nums):
dct[resort] = make_df_dicts(URL,nums)
output = open('../data/resort_dict.pkl', 'wb')
pickle.dump(dct, output)
output.close()
# loveland_greens = [word.encode('ascii','ignore').strip().decode('utf-8') for word in d['Loveland']['green']['Name']]
# loveland_blues = [word.encode('ascii','ignore').strip().decode('utf-8') for word in d['Loveland']['blue']['Name']]
# loveland_blacks = [word.encode('ascii','ignore').strip().decode('utf-8') for word in d['Loveland']['black']['Name']]
# loveland_bbs = [word.encode('ascii','ignore').strip().decode('utf-8') for word in d['Loveland']['bb']['Name']]
#
# def get_trails_list(resort,level):
# if d[resort][level] is None:
# return []
# else:
# return [word.encode('ascii','ignore').strip().decode('utf-8') for word in d[resort][level]['Name']]
#
# def get_table(URL):
# content = requests.get(URL).content
#
# soup = BeautifulSoup(content, "html.parser")
#
# rows = soup.select('tr')
#
# table_lst = []
# for row in rows:
# cell_lst = [cell for cell in row if cell != '\n']
# cell_lst = [cell.text for cell in cell_lst]
# table_lst.append(cell_lst)
#
# ranking = pd.DataFrame(table_lst)
# column_names = [x.strip('\n') for x in table_lst[0]]
# ranking.columns = column_names
# ranking = ranking.drop(0)
# if len(ranking['Resort Name'][1]) == 1:
# ranking = ranking.drop(1)
# ranking['Last Updated'] = ranking['Resort Name'].apply(lambda x: x.split('\n')[3])
# ranking['Resort Location'] = ranking['Resort Name'].apply(lambda x: x.split('\n')[2])
# ranking['Resort Name'] = ranking['Resort Name'].apply(lambda x: x.split('\n')[1])
# ranking['User Rating'] = ranking['User Rating'].apply(lambda x: x.split('\n')[1:3])
# return ranking
#
# terrain = get_table(URL_RM_terrain)
# mtn_stats = get_table(URL_RM_stats)
#
# terrain['Runs'] = terrain['Runs'].apply(lambda x: int(x.strip('\n').replace('/','')))
# levels = ['Beginner', 'Intermediate', 'Advanced', 'Expert']
# level_columns = dict()
# for level in levels:
# terrain[level] = terrain[level].apply(lambda x: int(x[:-1]) if len(x) > 2 else 0)
# level_columns[level] = '% '+level
# terrain = terrain.rename(columns = level_columns)
#
# num_fields = ['Base','Summit','Vertical Drop','Longest Run','Snow Making']
# field_columns = dict()
# for field in num_fields:
# field_columns[field] = field+' ({})'.format(mtn_stats[field][1][-2:])
# mtn_stats[field] = mtn_stats[field].apply(lambda x: float(x[:-2]) if x != 'N/A' else 0)
# mtn_stats = mtn_stats.rename(columns=field_columns) |
14,990 | 0144c497b2dc446ba7241ab37479b2bbaf8fcea1 |
from time import sleep
import xlwt
from xlwt import Workbook
from bs4 import BeautifulSoup
import requests
from datetime import date
def write_xls_header(filename):
wb = Workbook()
sheet1 = wb.add_sheet('Sheet 1')
sheet1.write(0, 0, 'Surname/Name')
sheet1.write(0, 1, 'Language Link Name')
sheet1.write(0, 2, 'Link example')
wb.save(filename+'.xls')
return wb,sheet1
def write_xlsx(wb,sheet1,data,index,file):
sheet1.write(index, 0, data[0])
sheet1.write(index, 1, data[1])
sheet1.write(index, 2, data[2])
wb.save(file+'.xls')
def main(url):
index = 1
wb, sheet1 = write_xls_header("surnames")
response = requests.get(url, timeout = 5)
soup = BeautifulSoup(response.text, 'lxml')
As = soup.find_all('a',{'class', 'CategoryTreeLabelNs14'})
for a in As:
sub_url = "https://en.wikipedia.org"+a['href']
name = a.text
print("***********************"+name)
print(index)
index = recurrent(sub_url, name, index,wb, sheet1)
# print(sub_url)
def recurrent(url, name, index, wb, sheet1):
val = ''
response = requests.get(url, timeout = 5)
soup = BeautifulSoup(response.text, 'lxml')
divs = soup.find_all('div',{'class', "mw-category-group"})
for div in divs:
lis = div.find_all('li')
for li in lis:
a = li.find('a')
if a.text.find("surnames") > -1 or a.text.find("Surnames") > -1 or a.text.find("names") > -1:
index = sub_getdata("https://en.wikipedia.org"+a['href'], a.text,index, wb, sheet1)
else:
if len(li.text.split(" ")) <= 2 and len(li.text) > 2:
write_xlsx(wb, sheet1, [name,li.text,response.url], index,"surnames")
index = index + 1
sub_divs = soup.find_all('div',{'class':'mw-content-ltr'})
for sub_div in sub_divs:
lis = sub_div.find_all('li')
for li in lis:
if li.text.find("surnames") > -1 or li.text.find("Surnames") > -1 or li.text.find("names") > -1:
pass
else:
if len(li.text.split(" ")) <= 2 and len(li.text) > 2:
write_xlsx(wb, sheet1, [name,li.text,response.url], index,"surnames")
index = index + 1
print("-----------------"+li.text)
return index
def sub_getdata(url, name,index, wb, sheet1):
val = ''
print("*************************"+name)
response = requests.get(url, timeout = 5)
soup = BeautifulSoup(response.text, 'lxml')
divs = soup.find_all('div',{'class', "mw-category-group"})
for div in divs:
lis = div.find_all('li')
for li in lis:
a = li.find('a')
if a.text.find("surnames") > -1 or a.text.find("Surnames") > -1 or a.text.find("names") > -1:
index = sub_getdatas("https://en.wikipedia.org"+a['href'],a.text, index ,wb, sheet1)
else:
if len(li.text.split(" ")) <= 2 and len(li.text) > 2:
write_xlsx(wb, sheet1, [name,a.text,response.url], index,"surnames")
index = index + 1
sub_divs = soup.find_all('div',{'class':'mw-content-ltr'})
for sub_div in sub_divs:
lis = sub_div.find_all('li')
for li in lis:
if li.text.find("surnames") > -1 or li.text.find("Surnames") > -1 or li.text.find("names") > -1:
pass
else:
if len(li.text.split(" ")) <= 2 and len(li.text) > 2:
write_xlsx(wb, sheet1, [name,li.text,response.url], index,"surnames")
index = index + 1
return index
def sub_getdatas(url, name,index, wb, sheet1):
val = ''
print(name)
response = requests.get(url, timeout = 5)
soup = BeautifulSoup(response.text, 'lxml')
divs = soup.find_all('div',{'class', "mw-category-group"})
for div in divs:
lis = div.find_all('li')
for li in lis:
a = li.find('a')
if a.text.find("surnames") > -1 or a.text.find("Surnames") > -1 or a.text.find("names") > -1:
# sub_getdata("https://en.wikipedia.org"+a['href'],index, a.text,wb, sheet1)
pass
else:
if len(li.text.split(" ")) <= 2 and len(li.text) > 2:
write_xlsx(wb, sheet1, [name,a.text,response.url], index,"surnames")
index = index + 1
sub_divs = soup.find_all('div',{'class':'mw-content-ltr'})
for sub_div in sub_divs:
lis = sub_div.find_all('li')
for li in lis:
if li.text.find("surnames") > -1 or li.text.find("Surnames") > -1 or li.text.find("names") > -1:
pass
else:
if len(li.text.split(" ")) <= 2 and len(li.text) > 2:
write_xlsx(wb, sheet1, [name,li.text,response.url], index,"surnames")
index = index + 1
return index
# As = soup.find_all("a",{"class":"CategoryTreeLabelNs14"})
# if len(As) == 0:
# for a in As:
# print("***********************************"+a.text)
# get_data("https://en.wikipedia.org"+a['href'])
# get_data(divs)
# except Exception as e:
# divs = soup.find_all('div',{'class', 'CategoryTreeItem'})
# for div in divs:
# sub_url = "https://en.wikipedia.org"+div.find('a')['href']
# recurrent(sub_url)
if __name__ == "__main__":
url = "https://en.wikipedia.org/wiki/Category:Surnames_by_language"
main(url) |
14,991 | fd15ea4873fbec7c4a4f270e61832810015aa945 | from django.db import models
# Create your models here.
# Dataset model will contain all the 3,33,333 records.
class Dataset(models.Model):
word = models.CharField(max_length=100000)
count = models.CharField(max_length=100000000000000)
def __str__(self):
return self.word
|
14,992 | 3bcb3ab455ed80c8c639da19024c347345613a1d | def underline(text):
return '\u0332'.join([c for c in text]) + '\u0332'
|
14,993 | 0508e675d07a13f48510fdbc62896c938270144c | def myfunc():
print('Hi, I am a func inside mymodule.py')
|
14,994 | e1b60d0d96e088f9b964a1d8f818ecbe83639ed4 | # Retrieve functions
from datetime import datetime as dt
import math
import re
import time
import dateutil.parser
import requests
import json
import config as cfg
import numpy as np
def get_filter_object(filterTerm, fieldName, filterValue=None, fieldValue=None):
"""
Returns a filter object that can be included in a OpenSearch query (https://www.elastic.co/guide/en/elasticsearch/reference/current/query-filter-context.html)
filterTerm options: None, =, >, >=, <, <=. Not None filterTerm should be accompanied with values.
"""
if fieldName is not None and filterTerm is not None and filterTerm != 'None': # check that there is a filter
# construct object according to filter type
if filterTerm == '=' and fieldValue is not None:
return {'term': {fieldName: fieldValue}}
if filterValue is not None:
if filterTerm == '>': # gt
return {'range': {fieldName: {'gt': filterValue}}}
if filterTerm == '>=': # gte
return {'range': {fieldName: {'gte': filterValue}}}
if filterTerm == '<': # lt
return {'range': {fieldName: {'lt': filterValue}}}
if filterTerm == '<=': # lte
return {'range': {fieldName: {'lte': filterValue}}}
return None # no filter
def getVector(text):
"""
Calls an external service to get the 512 dimensional vector representation of a piece of text.
"""
url = cfg.use_vectoriser
res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})
res_dictionary = res.json()
return res_dictionary['vectors']
def getVectorSemanticSBERT(text):
"""
Calls an external service to get the 768 dimensional vector representation of a piece of text.
"""
url = cfg.sbert_vectoriser
res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})
res_dictionary = res.json()
return res_dictionary['vectors']
def getVectorSemanticSBERTArray(text):
"""
Calls an external service to get the 768 dimensional vector representation of a piece of text.
"""
repArray = []
for element in text:
repArray.append(getVectorSemanticSBERT(element))
return repArray
def checkOntoSimilarity(ontology_id):
"""
Calls an external service to check if an ontology based similarity measures exist.
"""
# print('checkOntoSimilarity() =>', ontology_id)
url = cfg.ontology_sim + '/status'
res = requests.post(url, json={'ontologyId': ontology_id})
resp = res.json()
resp['statusCode'] = res.status_code
return resp #resp['statusCode'] = 200 if ontology exists and 404 otherwise
def getOntoSimilarity(ontology_id, key):
"""
Calls an external service to get ontology based similarity values for concept comparisons.
"""
# print('getOntoSimilarity() =>', ontology_id)
url = cfg.ontology_sim + '/query'
res = requests.post(url, json={'ontologyId': ontology_id, 'key': key})
res_dictionary = res.json()
return res_dictionary.get('map', {})
def setOntoSimilarity(ontology_id, ontology_sources, relation_type=None, root_node=None, similarity_method="wup"):
"""
Calls an external service to create ontology based similarity values for concept comparisons.
"""
# print('setOntoSimilarity() =>', ontology_id)
url = cfg.ontology_sim + '/preload'
body = {'ontologyId': ontology_id, 'sources': ontology_sources}
if relation_type is not None and len(relation_type) > 0:
body['relation_type'] = relation_type
if root_node is not None and len(root_node) > 0:
body['root_node'] = root_node
body['similarity_method'] = similarity_method
res = requests.post(url, json=body)
return res.json()
def removeOntoIndex(ontology_id):
"""
Calls an external service to remove an ontology index of similarity measures.
"""
# print('removeOntoIndex() =>', ontology_id)
url = cfg.ontology_sim + '/delete'
body = {
"ontologyId": ontology_id
}
try:
res = requests.post(url, json=body)
return res.json()
except:
print("Could not remove details for ontology with id " + ontology_id)
return False
def add_vector_fields(attributes, data):
"""
Expand data values to include vector fields.
Transforms "x: val" to "x: {name: val, rep: vector(val)}"
"""
for attrib in attributes:
if attrib['similarity'] == 'Semantic USE':
value = data.get(attrib['name'])
if value is not None:
newVal = {}
newVal['name'] = value
newVal['rep'] = getVector(value)
data[attrib['name']] = newVal
elif attrib['similarity'] == 'Semantic SBERT':
value = data.get(attrib['name'])
if value is not None:
newVal = {}
newVal['name'] = value
newVal['rep'] = getVectorSemanticSBERT(value)
data[attrib['name']] = newVal
elif attrib['similarity'] == 'Array SBERT':
value = data.get(attrib['name'])
if value is not None:
newVal = {}
newVal['name'] = value
newVal["rep"] = []
array = getVectorSemanticSBERTArray(value)
for element in array:
temp = {}
temp['rep'] = element
newVal["rep"].append(temp)
data[attrib['name']] = newVal
return data
def remove_vector_fields(attributes, data):
"""
Flatten data values to remove vector fields.
Transforms "x: {name: val, rep: vector(val)}" to "x: val"
"""
for attrib in attributes:
if attrib['similarity'] == 'Semantic USE' or attrib['similarity'] == 'Semantic SBERT' or attrib['similarity'] == 'Array SBERT':
value = data.get(attrib['name'])
if value is not None:
data[attrib['name']] = value['name']
return data
def add_lowercase_fields(attributes, data):
"""
Change values for fields of EqualIgnoreCase to lowercase.
Transforms "x: Val" to "x: val"
"""
for attrib in attributes:
if attrib['similarity'] == 'EqualIgnoreCase':
value = data.get(attrib['name'])
if value is not None:
data[attrib['name']] = value.lower()
return data
def get_attribute_by_name(attributes, attributeName):
"""
Retrieves an attribute by name from list of attributes.
"""
for attrib in attributes:
if attrib['name'] == attributeName:
return attrib
return None
def explain_retrieval(es, index_name, query, doc_id, matched_queries):
"""
End-point: Explain the scoring for a retrieved case.
"""
expl = []
# print(matched_queries)
query.pop("size", None) # request does not support [size]
res = es.explain(index=index_name, body=query, id=doc_id, stored_fields="true")
details = res["explanation"]["details"]
# print(json.dumps(res, indent=4))
for idx, x in enumerate(matched_queries):
expl.append({x: details[idx]['value']})
# print(expl)
return expl
def get_explain_details2(match_explanation):
"""
Extracts the field names and local similarity values from explanations.
Note: Could fail if the format of explanations change as it uses regex to extract field names. Skips explanation for
a field if it cannot find the field name.
"""
expl = []
matchers = match_explanation["details"] # at times the explanation is not in 'details' list!!
if len(matchers) <= 1: # not more than one explanation then match attribute in the entire string
txt = str(match_explanation)
m0 = re.search("attrib=([a-zA-Z0-9_\-\s]+)",
txt) # FRAGILE: relies on specific format for the explanation to include attribute name
if m0: # if field name is found
expl.append({"field": m0.group(1), "similarity": match_explanation['value']})
else: # more than one explanation then iterate and match attributes
for x in matchers:
txt = str(x)
m0 = re.search("attrib=([a-zA-Z0-9_\-\s]+)", txt)
if m0: # if field name is found
expl.append({"field": m0.group(1), "similarity": x['value']})
return expl
def get_explain_details(match_explanation):
"""
Extracts the field names and local similarity values from explanations.
Note: Could fail if the format of explanations change as it uses regex to extract field names. Skips explanation for
a field if it cannot find the field name.
"""
expl = []
for x in match_explanation["details"]:
if len(re.findall("attrib=([a-zA-Z0-9_\-\s]+)", str(x))) > 1:
expl.extend(get_explain_details(x))
elif len(re.findall("attrib=([a-zA-Z0-9_\-\s]+)", str(x))) == 1:
expl.append({"field": re.search("attrib=([a-zA-Z0-9_\-\s]+)", str(x)).group(1), "similarity":x['value']})
return expl
def cosine_similarity(v1, v2):
"""
Computes cosine similarity between two vectors.
"""
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
def get_feedback_details(queryFeatures, pId, cId, es):
"""
Manually compares arrays and makes note of low scoring elements
"""
values = {}
feedback = []
# get case
query = {}
query['query'] = {}
query['query']['terms'] = {}
query['query']['terms']["_id"] = []
query['query']['terms']["_id"].append(cId)
res = es.search(index=pId, body=query)
# find attributes that are array sbert and store their values, also convert query array to vector array
for attrib in queryFeatures:
if attrib['similarity'] == 'Array SBERT':
temp = {"values": res['hits']['hits'][0]['_source'][attrib['name']]['name'], "vec": res['hits']['hits'][0]['_source'][attrib['name']]['rep'], "queryRep": getVectorSemanticSBERTArray(attrib['value'])}
values[attrib['name']] = temp
# perform a cosine similarity between query elements and case elements
for key, value in values.items():
for idx, vec in enumerate(value['vec']):
maxsim = 0
for idx2, vec2 in enumerate(value['queryRep']):
sim = cosine_similarity(vec['rep'], vec2)
if sim > maxsim:
maxsim = sim
if maxsim < 0.7:
feedback.append({"field": key, "value": value['values'][idx], "similarity": maxsim})
#print("feedback: ", feedback)
return feedback
def get_min_max_values(es,casebase,attribute):
query = {
"aggs": {
"max": { "max": { "field": attribute } },
"min": { "min": { "field": attribute } }
}
}
res = es.search(index=casebase, body=query, explain=False)
if res['aggregations']['max']['value'] is None or res['aggregations']['min']['value'] is None:
res['aggregations']['max']['value'] = 1
res['aggregations']['min']['value'] = 0
res = {"max": res["aggregations"]["max"]["value"], "min": res["aggregations"]["min"]["value"], "interval": res["aggregations"]["max"]["value"] - res["aggregations"]["min"]["value"]}
return res
def update_attribute_options(es,proj,attrNames = []):
#time.sleep(2) # wait for the operation to complete
if not attrNames: # if no attributes specified, update all attributes
attrNames = []
for attr in proj['attributes']:
attrNames.append(attr['name'])
for attr in attrNames:
for elem in proj['attributes']:
if elem['name'] == attr:
if elem['type'] == "Integer" or elem['type'] == "Float" or elem['type'] == "Date":
res = get_min_max_values(es, proj['casebase'], attr)
if 'options' in elem:
if 'min' in elem['options']:
elem['options']['min'] = res['min']
if 'max' in elem['options']:
elem['options']['max'] = res['max']
if elem['options']['max'] == elem['options']['min']: # if min and max are the same, set max to min + 0.001
elem['options']['max'] += 0.001
if 'interval' in elem['options']:
elem['options']['interval'] = res['interval']
if 'nscale' in elem['options']: # set scale to 10% of the interval (floats and integers)
elem['options']['nscale'] = res['interval']/10
elem['options']['nscale'] = 1 if elem['options']['nscale'] < 1 else elem['options']['nscale'] # if nscale is less than 1, set it to 1
elem['options']['ndecay'] = 0.9
if 'dscale' in elem['options']: # set scale to 10% of the interval (dates)
elem['options']['dscale'] = str(math.ceil((dt.fromtimestamp(res['max']/1000)-dt.fromtimestamp(res['min']/1000)).days/10)) + "d" # if date scale is 0, set it to 1 day
elem['options']['dscale'] = elem['options']['dscale'].replace("0d","1d")
elem['options']['ddecay'] = 0.9
result = es.update(index='projects', id=proj['id__'], body={'doc': proj}, filter_path="-_seq_no,-_shards,-_primary_term,-_version,-_type",refresh=True)
return result
def getQueryFunction(projId, caseAttrib, queryValue, type, weight, simMetric, options):
"""
Determine query function to use base on attribute specification and retrieval features.
Add new query functions in the if..else statement as elif.
"""
# print("all info: ", projId, caseAttrib, queryValue, weight, simMetric, options)
# minVal = kwargs.get('minVal', None) # optional parameter, minVal (name 'minVal' in function params when calling function e.g. minVal=5)
if simMetric == "Equal":
if type == "String" or type == "Text" or type == "Keyword" or type == "Integer":
return Exact(caseAttrib, queryValue, weight)
elif type == "Float":
return ExactFloat(caseAttrib, queryValue, weight)
elif simMetric == "EqualIgnoreCase":
queryValue = queryValue.lower()
return Exact(caseAttrib, queryValue, weight)
elif simMetric == "McSherry More": # does not use the query value
maxValue = options.get('max', 100.0) if options is not None else 100.0 # use 100 if no supplied max
minValue = options.get('min', 0.0) if options is not None else 0.0 # use 0 if no supplied min
return McSherryMoreIsBetter(caseAttrib, queryValue, maxValue, minValue, weight)
elif simMetric == "McSherry Less": # does not use the query value
maxValue = options.get('max', 100.0) if options is not None else 100.0 # use 100 if no supplied max
minValue = options.get('min', 0.0) if options is not None else 0.0 # use 0 if no supplied min
return McSherryLessIsBetter(caseAttrib, queryValue, maxValue, minValue, weight)
elif simMetric == "INRECA More":
jump = options.get('jump', 1.0) if options is not None else 1.0 # use 1 if no supplied jump
return InrecaMoreIsBetter(caseAttrib, queryValue, jump, weight)
elif simMetric == "INRECA Less":
jump = options.get('jump', 1.0) if options is not None else 1.0 # use 1 if no supplied jump
maxValue = options.get('max', 100.0) if options is not None else 100.0 # use 100 if no supplied max
return InrecaLessIsBetter(caseAttrib, queryValue, maxValue, jump, weight)
elif simMetric == "Interval":
maxValue = options.get('max', 100.0) if options is not None else 100.0 # use 100 if no supplied max
minValue = options.get('min', 100.0) if options is not None else 100.0 # use 100 if no supplied min
return Interval(caseAttrib, queryValue, maxValue, minValue, weight)
elif simMetric == "Semantic USE" and cfg.use_vectoriser is not None:
return USE(caseAttrib, getVector(queryValue), weight)
elif simMetric == "Semantic SBERT" and cfg.sbert_vectoriser is not None:
return Semantic_SBERT(caseAttrib, getVectorSemanticSBERT(queryValue), weight)
elif simMetric == "Nearest Date":
scale = options.get('dscale', '365d') if options is not None else '365d'
decay = options.get('ddecay', 0.999) if options is not None else 0.999
return ClosestDate(caseAttrib, queryValue, weight, scale, decay)
elif simMetric == "Nearest Number":
scale = int(options.get('nscale', 1)) if options is not None else 1
decay = options.get('ndecay', 0.999) if options is not None else 0.999
return ClosestNumber(caseAttrib, queryValue, weight, scale, decay)
elif simMetric == "Nearest Location":
scale = options.get('lscale', '10km') if options is not None else '10km'
decay = options.get('ldecay', 0.999) if options is not None else 0.999
return ClosestLocation(caseAttrib, queryValue, weight, scale, decay)
elif simMetric == "Table":
return TableSimilarity(caseAttrib, queryValue, weight, options)
elif simMetric == "EnumDistance":
return EnumDistance(caseAttrib, queryValue, weight, options)
elif simMetric == "Query Intersection":
return QueryIntersection(caseAttrib, queryValue, weight)
elif simMetric == "Path-based":
sim_grid = getOntoSimilarity(projId + "_ontology_" + options['name'], queryValue)
return OntologySimilarity(caseAttrib, queryValue, weight, sim_grid)
elif simMetric == "Feature-based":
sim_grid = getOntoSimilarity(projId + "_ontology_" + options['name'], queryValue)
return OntologySimilarity(caseAttrib, queryValue, weight, sim_grid)
elif simMetric == "Jaccard" or simMetric == "Array": # Array was renamed to Jaccard. "Array" kept on until adequate notice is given to update existing applications.
return Jaccard(caseAttrib, queryValue, weight)
elif simMetric == "Array SBERT":
return ArraySBERT(caseAttrib, getVectorSemanticSBERTArray(queryValue), weight)
else:
return MostSimilar(caseAttrib, queryValue, weight)
# Similarity measure functions for retrieve phase of CBR cycle.
# Each similarity function returns a Painless script for Elasticsearch.
# Each function requires field name and set of functions-specific parameters.
def Jaccard(caseAttrib, queryValue, weight):
"""
Returns the similarity between two arrays
"""
try:
# build query string comparing two arrays
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"source": "float sum = 0; for (int i = 0; i < doc[params.attrib].length; i++) { for (int j = 0; j < params.queryValue.length; j++) { if (doc[params.attrib][i] == params.queryValue[j]) { sum += 1; } } } return sum*params.weight/(doc[params.attrib].length+params.queryValue.length-sum);",
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"weight": weight
}
}
}
}
return queryFnc
except ValueError:
print("Error")
def ArraySBERT(caseAttrib, queryValue, weight):
"""
Returns the similarity between two arrays
"""
try:
# build query string comparing two arrays
queryFnc = {
"nested": {
"path": caseAttrib + ".rep",
"score_mode": "avg",
"query": {
"function_score": {
"script_score": {
"script": {
"source": "float max = 0; for (int i = 0; i < params.queryValue.length; i++) { float cosine = cosineSimilarity(params.queryValue[i], doc[params.attrib_vector]); if(cosine > max) { max = cosine; } } return max*params.weight;",
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"weight": weight,
"attrib_vector": caseAttrib + ".rep.rep"
}
}
}
}
}
}
}
return queryFnc
except ValueError:
print("Error")
def McSherryLessIsBetter(caseAttrib, queryValue, maxValue, minValue, weight):
"""
Returns the similarity of two numbers following the McSherry - Less is better formula. queryVal is not used!
"""
try:
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"source": "((float)(Math.max(params.max,params.queryValue) - doc[params.attrib].value) / (float)(Math.max(params.max,params.queryValue) - Math.min(params.min,params.queryValue))) * params.weight",
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"max": maxValue,
"min": minValue,
"weight": weight
}
},
"_name": caseAttrib
}
}
return queryFnc
except ValueError:
print("McSherryLessIsBetter() is only applicable to numbers")
def McSherryMoreIsBetter(caseAttrib, queryValue, maxValue, minValue, weight):
"""
Returns the similarity of two numbers following the McSherry - More is better formula.
"""
try:
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"source": "(1 - ((float)(Math.max(params.max,params.queryValue) - doc[params.attrib].value) / (float)(Math.max(params.max,params.queryValue) - Math.min(params.min,params.queryValue)) )) * params.weight",
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"max": maxValue,
"min": minValue,
"weight": weight
}
},
"_name": caseAttrib
}
}
return queryFnc
except ValueError:
print("McSherryMoreIsBetter() is only applicable to numbers")
def InrecaLessIsBetter(caseAttrib, queryValue, maxValue, jump, weight):
"""
Returns the similarity of two numbers following the INRECA - Less is better formula.
"""
try:
queryValue = float(queryValue)
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"source": "if (doc[params.attrib].value <= params.queryValue) { return (1.0 * params.weight) } if (doc[params.attrib].value >= (Math.max(params.max,params.queryValue)) { return 0 } return (params.jump * (float)((Math.max(params.max,params.queryValue) - doc[params.attrib].value) / (float)((Math.max(params.max,params.queryValue) - params.queryValue)) * params.weight",
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"jump": jump,
"max": maxValue,
"weight": weight
}
},
"_name": caseAttrib
}
}
return queryFnc
except ValueError:
print("InrecaLessIsBetter() is only applicable to numbers")
def InrecaMoreIsBetter(caseAttrib, queryValue, jump, weight):
"""
Returns the similarity of two numbers following the INRECA - More is better formula.
"""
try:
queryValue = float(queryValue)
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"source": "if (doc[params.attrib].value >= params.queryValue) { return (1.0 * params.weight) } return (params.jump * (1 - ((float)(params.queryValue - doc[params.attrib].value) / (float)params.queryValue))) * params.weight",
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"jump": jump,
"weight": weight
}
},
"_name": caseAttrib
}
}
return queryFnc
except ValueError:
print("InrecaMoreIsBetter() is only applicable to numbers")
def Interval(caseAttrib, queryValue, max, min, weight):
"""
Returns the similarity of two numbers inside an interval.
"""
try:
queryValue = float(queryValue)
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"max": max,
"min": min,
"weight": weight
},
"source": "(1 - (float)( Math.abs(params.queryValue - doc[params.attrib].value) / ((float)Math.max(params.max,params.queryValue) - (float)Math.min(params.min,params.queryValue)) )) * params.weight"
},
"_name": caseAttrib
}
}
return queryFnc
except ValueError:
print("Interval() is only applicable to numbers")
def EnumDistance(caseAttrib, queryValue, weight, options): # stores enum as array
"""
Implements EnumDistance local similarity function.
Returns the similarity of two enum values as their distance sim(x,y) = |ord(x) - ord(y)|.
"""
try:
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"lst": options.get('values'),
"weight": weight
},
"source": "if (params.lst.contains(doc[params.attrib].value)) { return (1 - ( (float) Math.abs(params.lst.indexOf(params.queryValue) - params.lst.indexOf(doc[params.attrib].value)) / (float)params.lst.length )) * params.weight }"
},
"_name": caseAttrib
}
}
return queryFnc
except ValueError:
print("Interval() is only applicable to numbers")
# def TermQuery(caseAttrib, queryValue, weight):
# """
# Matches query to equal field values using in-built method.
# """
# # build query string
# query = {
# "term": {
# caseAttrib: {
# "value": queryValue,
# "boost": weight,
# "_name": caseAttrib
# }
# }
# }
# return query
def Exact(caseAttrib, queryValue, weight):
"""
Exact matches for fields defined as equal. Attributes that use this are indexed using 'keyword'.
"""
# build query string
queryFnc = {
"script_score": {
"query": {
"match": {
caseAttrib: queryValue
}
},
"script": {
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"weight": weight
},
"source": "if (params.queryValue == doc[params.attrib].value) { return (1.0 * params.weight) }"
},
"_name": caseAttrib
}
}
return queryFnc
def ExactFloat(caseAttrib, queryValue, weight):
"""
Exact matches for fields defined as equal. Attributes that use this are indexed using 'keyword'.
"""
# build query string
queryFnc = {
"script_score": {
"query": {
"match": {
caseAttrib: queryValue
}
},
"script": {
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"weight": weight
},
"source": "if (Math.abs(params.queryValue - doc[params.attrib].value) < 0.0001) {return (1.0 * params.weight) }"
},
"_name": caseAttrib
}
}
return queryFnc
# def MostSimilar_2(caseAttrib, queryValue, weight):
# """
# Most similar matches using ES default (works for all attribute types). Default similarity for strings and exact match for other types.
# """
# # build query string
# query = {
# "match": {
# caseAttrib: {
# "query": queryValue,
# "boost": weight,
# "_name": caseAttrib
# }
# }
# }
# return query
def MostSimilar(caseAttrib, queryValue, weight):
"""
Most similar matches using ES default (works for all attribute types).
Default similarity (BM25) for strings and exact match for other types.
"""
# build query string
queryFnc = {
"script_score": {
"query": {
"match": {
caseAttrib: queryValue
}
},
"script": {
"params": {
"attrib": caseAttrib,
"weight": weight
},
"source": "params.weight * _score" # right side is a dummy to help explanation method
},
"_name": caseAttrib
}
}
return queryFnc
# def ClosestDate_2(caseAttrib, queryValue, weight, maxDate, minDate): # format 'dd-MM-yyyy' e.g. '01-02-2020'
# """
# Find the documents whose attribute values have the closest date to the query date. The date field is indexed as 'keyword' to enable use of this similarity metric.
# """
# # build query string
# queryFnc = {
# "script_score": {
# "query": {
# "match_all": {}
# },
# "script": {
# "params": {
# "attrib": caseAttrib,
# "weight": weight,
# "queryValue": queryValue,
# "oldestDate": minDate,
# "newestDate": maxDate
# },
# "source": "SimpleDateFormat sdf = new SimpleDateFormat('dd-MM-yyyy', Locale.ENGLISH); doc[params.attrib].size()==0 ? 0 : (1 - Math.abs(sdf.parse(doc[params.attrib].value).getTime() - sdf.parse(params.queryValue).getTime()) / ((sdf.parse(params.newestDate).getTime() - sdf.parse(params.oldestDate).getTime()) + 1)) * params.weight"
# },
# "_name": caseAttrib
# }
# }
# return queryFnc
def ClosestDate(caseAttrib, queryValue, weight, scale, decay): # format 'dd-MM-yyyy' e.g. '01-02-2020'
"""
Find the documents whose attribute values have the closest date to the query date. The date field field is indexed as 'keyword' to enable use of this similarity metric.
"""
# format = "%d-%m-%Y"'T'"%H:%M:%SZ"
# qd = dateutil.parser.isoparse(queryValue)
# queryValue = qd.strftime(format) # enforce query conversion to a known date format
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"params": {
"attrib": caseAttrib,
"origin": queryValue,
"scale": scale,
"offset": "0",
"decay": decay,
"weight": weight
},
"source": "decayDateExp(params.origin, params.scale, params.offset, params.decay, doc[params.attrib].value) * params.weight",
},
"_name": caseAttrib
}
}
return queryFnc
def USE(caseAttrib, queryValue, weight):
"""
Returns the similarity of two numbers using their vector representation.
"""
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"params": {
"attrib": caseAttrib,
"query_vector": queryValue,
"attrib_vector": caseAttrib + '.rep',
"weight": weight
},
"source": "(cosineSimilarity(params.query_vector, doc[params.attrib_vector])+1)/2 * params.weight"
},
"_name": caseAttrib
}
}
return queryFnc
def Semantic_SBERT(caseAttrib, queryValue, weight):
"""
Returns the similarity of two numbers using their vector representation.
"""
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"params": {
"attrib": caseAttrib,
"query_vector": queryValue,
"attrib_vector": caseAttrib + '.rep',
"weight": weight
},
"source": "(cosineSimilarity(params.query_vector, doc[params.attrib_vector])+1)/2 * params.weight"
},
"_name": caseAttrib
}
}
return queryFnc
def ClosestNumber(caseAttrib, queryValue, weight, scale, decay):
"""
Find the documents whose attribute values have the closest number to the query value.
"""
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"params": {
"attrib": caseAttrib,
"origin": queryValue,
"scale": scale,
"offset": 0,
"decay": decay,
"weight": weight
},
"source": "decayNumericExp(params.origin, params.scale, params.offset, params.decay, doc[params.attrib].value) * params.weight",
},
"_name": caseAttrib
}
}
return queryFnc
def ClosestLocation(caseAttrib, queryValue, weight, scale, decay):
"""
Find the documents whose attribute values have the geo_point to the query value.
"""
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"params": {
"attrib": caseAttrib,
"origin": queryValue,
"scale": scale,
"offset": "0km",
"decay": decay,
"weight": weight
},
"source": "decayGeoExp(params.origin, params.scale, params.offset, params.decay, doc[params.attrib].value) * params.weight",
},
"_name": caseAttrib
}
}
return queryFnc
def TableSimilarity(caseAttrib, queryValue, weight, options): # stores enum as array
"""
Implements Table local similarity function.
Returns the similarity of two categorical values as specified in a similarity table.
"""
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"sim_grid": options.get('sim_grid'),
"grid_values": list(options.get('sim_grid', {})),
"weight": weight
},
"source": "if (params.grid_values.contains(doc[params.attrib].value)) { return (params.sim_grid[params.queryValue][doc[params.attrib].value]) * params.weight }"
},
"_name": caseAttrib
}
}
return queryFnc
def QueryIntersection(caseAttrib, queryValue, weight):
"""
Implements Query Intersection local similarity function.
Returns the similarity between two sets as their intersect offset by the length of the query set.
If query set is q and case attribute value is a, returns |a∩q|/|q|
"""
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"params": {
"attrib": caseAttrib,
"queryValue": list(queryValue),
"weight": weight
},
"source": "double intersect = 0.0; "
"if (doc[params.attrib].size() > 0 && doc[params.attrib].value.length() > 0) { "
"for (item in doc[params.attrib]) { if (params.queryValue.contains(item)) { intersect = intersect + 1; } }"
"return (intersect/params.queryValue.length) * params.weight;} "
# "else { return 0;}"
},
"_name": caseAttrib
}
}
return queryFnc
def OntologySimilarity(caseAttrib, queryValue, weight, sim_grid):
"""
Uses similarity values from an ontology-based measure such as the Wu & Palmer algorithm.
Returns the similarity of two ontology entities as specified in a similarity grid.
"""
# build query string
queryFnc = {
"script_score": {
"query": {
"exists": {
"field": caseAttrib
}
},
"script": {
"params": {
"attrib": caseAttrib,
"queryValue": queryValue,
"sim_grid": sim_grid,
"grid_concepts": list(sim_grid),
"weight": weight
},
"source": "if (params.grid_concepts.contains(doc[params.attrib].value)) { return (params.sim_grid[doc[params.attrib].value]) * params.weight }"
},
"_name": caseAttrib
}
}
return queryFnc
def MatchAll():
"""
Retrieve all documents. There is a 10,000 size limit for Elasticsearch query results! The Scroll API can be used to
retrieve more than 10,000.
"""
return {"match_all": {}}
|
14,995 | 06ef9c88c3719c02a2919b6dfe0d7a11ae5813c5 | # !/usr/bin/python
# -*- coding:utf-8 -*-
# 定义api
"""
栈的定义:
一个后进先出的集合。
class Stack<Item>:
Stack() 实例化栈对象
void push(Item item) 将元素入栈
Item pop() 弹出栈顶元素
bool is_empty() 判断栈是否为空
int size() 返回栈的当前容量
__iter__ 返回栈的迭代器对象
__next__ 返回迭代器中下一个元素
"""
from abc import ABCMeta, abstractmethod
class IteratorInterface(object):
def __iter__(self):
raise NotImplementedError
def __next__(self):
raise NotImplementedError
class AbstractStack(IteratorInterface, metaclass=ABCMeta):
@abstractmethod
def push(self, item):
pass
@abstractmethod
def pop(self):
pass
@abstractmethod
def is_empty(self):
pass
@abstractmethod
def size(self):
pass
class Error(Exception):
pass
class StackIsEmptyError(Error):
pass
|
14,996 | dfbcb0ee59248865e0365b4d510e60c396ef4829 | import random
class robo:
x = random.randint(300, 600)
y = random.randint(500, 600)
color = (153, 51, 0)
class bola:
x = 400
y = 300
thickness = 10
color = (255, 255, 0)
class line:
xi = robo.x+0.5
yi = robo.y
xf = bola.x
yf = bola.y
thickness = 1
color = (0, 0, 0)
|
14,997 | b846b87a45d9f74b36e95c150e599cdcbb27818f | # Programmers Coding Test Practice
# 2018 KAKAO BLIND RECRUITMENT [1차] 뉴스 클러스터링
#
# https://programmers.co.kr/learn/courses/30/lessons/17677
#
# ==============================================================================
from string import ascii_uppercase
from copy import deepcopy
def get_multiple_set(string):
string = string.upper()
multiple_set = []
for i in range(len(string) - 1):
if string[i] in ascii_uppercase and string[i + 1] in ascii_uppercase:
multiple_set.append(string[i] + string[i + 1])
return multiple_set
def get_intersection(multiset1, multiset2):
intersection = []
multiset2 = deepcopy(multiset2)
for element in multiset1:
if element in multiset2:
intersection.append(element)
multiset2.remove(element)
return intersection
def solution(str1, str2):
multiplier = 65536
multiset1 = get_multiple_set(str1)
multiset2 = get_multiple_set(str2)
intersection = get_intersection(multiset1, multiset2)
divider = len(multiset1) + len(multiset2) - len(intersection)
if divider == 0:
return multiplier
answer = int(multiplier * len(intersection) / (len(multiset1) + len(multiset2) - len(intersection)))
return answer
if __name__ == '__main__':
assert solution('FRANCE', 'french') == 16384
assert solution('handshake', 'shake hands') == 65536
assert solution('aa1+aa2', 'AAAA12') == 43690
assert solution('E=M*C^2', 'e=m*c^2') == 65536
|
14,998 | c107be358ea37ac32543dabd96b4df3a0048a01b | from io import BytesIO
from subprocess import Popen, PIPE
import pytest
def lzo_stream(*, length: int = 4096):
"""
Compress a string of null bytes, the length being defined by the
argument to this function.
"""
compressor = Popen(["lzop", "-c"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = compressor.communicate(input=b"\x00" * length)
if stderr:
raise Exception(f"Failed to compress with error {stderr!r}")
stream = BytesIO(stdout)
stream.seek(0)
return stream
@pytest.fixture
def small_lzo():
return lzo_stream(length=1)
@pytest.fixture
def big_lzo():
return lzo_stream(length=10 ** 6)
|
14,999 | e3fe40178140cc9dedffd1e1918d8ccb528d4913 | class Solution:
def swimInWater(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
n=len(grid)
count=-1
grid2=[[n**2 for i in range(n)] for j in range(n)]
grid2[0][0]=grid[0][0]
while count!=0:
count=0
for i in range(n):
for j in range(n):
neighbor=[]
if i+1<n and grid:
neighbor.append(grid[i+1][j])
if i-1>=0:
neighbor.append(grid[i-1][j])
if j+1<n:
neighbor.append(grid[i][j+1])
if j-1>=0:
neighbor.append(grid[i][j-1])
minvalue=min(neighbor)
if minvalue>grid[i][j]:
count+=1
grid[i][j]=minvalue
return grid[n-1][n-1]
grid=[[0,1,2,3,4],[24,23,22,21,5],[12,13,14,15,16],[11,17,18,19,20],[10,9,8,7,6]]
c=Solution().swimInWater(grid)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.