text stringlengths 38 1.54M |
|---|
import random
import json
from pico2d import *
class Astronaut:
image = None
life_image = None
# 거리 환산
PIXEL_PER_METER = (1 / 0.03) # 1 pixel 3 cm
# 걷는 속도
RUN_SPEED_KMPH = 25 # Km / Hour
RUN_SPEED_MPM = (RUN_SPEED_KMPH * 1000.0 / 60.0)
RUN_SPEED_MPS = (RUN_SPEED_MPM / 60.0)
RUN_SPEED_PPS = (RUN_SPEED_MPS * PIXEL_PER_METER)
# 빨려들어가는 속도
PULL_SPEED_KMPH = 18 # Km / Hour
PULL_SPEED_MPM = (PULL_SPEED_KMPH * 1000.0 / 60.0)
PULL_SPEED_MPS = (PULL_SPEED_MPM / 60.0)
PULL_SPEED_PPS = (PULL_SPEED_MPS * PIXEL_PER_METER)
# 무기 프레임설정
BLUE, PURPLE, YELLOW, GREEN, RED = "0", "1", "2", "3", "4"
weapon_property_file = open('json/astronaut/weapon_property.json', 'r')
weapon_property = json.load(weapon_property_file)
weapon_property_file.close()
# 방향
LEFT_DIRECT, RIGHT_DIRECT, FRONT_DIRECT = "6", "3" ,"0"
# 걷기 프레임
TIME_PER_MOVE = 3.8
MOVE_PER_TIME = 1.0 / TIME_PER_MOVE
FRAME_PER_MOVE = 3
# 점프 관련
NOT_JUMP, JUMP, FALLING = 0, 1, 2
GRAVITY_ACCELERATION_MPH = 98
JUMP_SPEED_KMPH = 20 # Km / Hour
JUMP_SPEED_MPM = (JUMP_SPEED_KMPH * 1000.0 / 60.0)
JUMP_SPEED_MPS = (JUMP_SPEED_MPM / 60.0)
JUMP_SPEED_PPS = (JUMP_SPEED_MPS * PIXEL_PER_METER)
JUMP_MOVE_SPEED_KMPH = 15 # Km / Hour
JUMP_MOVE_SPEED_MPM = (JUMP_MOVE_SPEED_KMPH * 1000.0 / 60.0)
JUMP_MOVE_SPEED_MPS = (JUMP_MOVE_SPEED_MPM / 60.0)
JUMP_MOVE_SPEED_PPS = (JUMP_MOVE_SPEED_MPS * PIXEL_PER_METER)
def __init__(self, x, y, last_plate):
if self.image == None:
self.image = load_image('image/astronaut/astronaut.png')
if self.life_image == None:
self.life_image = load_image('image/astronaut/life.png')
self.x, self.y = x, y
# 제일아래 발판
self.last_plate = last_plate
# 바운딩 박스 체크
self.draw_bb_bool = False
# 라이프
self.life = 3
# 공격 여부 (False == 1, True = 세로 0,1 반복)
self.is_shot = False
self.shot_frame = 0
self.shot_total_frame = 0
self.weapon = Astronaut.BLUE
self.make_bullet = True
# 방향
self.direction = Astronaut.RIGHT_DIRECT
# 걷기 프레임 설정 (False == 1, True = 가로 0,1,2 반복)
self.is_move = 0
self.move_frame = 0
self.move_total_frame = 0
# 점프 관련
self.jump_state = Astronaut.FALLING
self.now_jump_speed = 0
self.jump_gap = 0
self.down_key = False
def reset(self, x, y):
self.x, self.y = x, y
self.direction = Astronaut.RIGHT_DIRECT
self.is_move = 0
self.move_frame = 0
self.move_total_frame = 0
self.jump_state = Astronaut.FALLING
self.now_jump_speed = 0
self.jump_gap = 0
self.is_shot = False
self.shot_frame = 0
self.shot_total_frame = 0
def update(self, frame_time):
# 공격
if self.is_shot:
self.shot_frame = int(self.shot_total_frame) % 2 + 1
self.shot_total_frame += self.weapon_property[self.weapon]['frame_per_shot'] * self.weapon_property[self.weapon]['time_per_shot'] * frame_time
# 이동
if self.is_move == 0:
self.move_frame = 1
else:
self.move_total_frame += Astronaut.FRAME_PER_MOVE * self.TIME_PER_MOVE * frame_time
if self.direction == self.LEFT_DIRECT:
self.move_frame = 2 - int(self.move_total_frame) % 3
if self.jump_state == Astronaut.JUMP or self.jump_state == Astronaut.FALLING:
self.x -= self.JUMP_MOVE_SPEED_PPS * frame_time
else:
self.x -= self.RUN_SPEED_PPS * frame_time
self.x = max(self.x, 16)
else:
self.move_frame = int(self.move_total_frame) % 3
if self.jump_state == Astronaut.JUMP or self.jump_state == Astronaut.FALLING:
self.x += self.JUMP_MOVE_SPEED_PPS * frame_time
else:
self.x += self.RUN_SPEED_PPS * frame_time
self.x = min(self.x, 1006)
# 점프
if self.jump_state == Astronaut.JUMP or self.jump_state == Astronaut.FALLING:
self.move_frame = 2
if self.jump_state == Astronaut.JUMP:
self.now_jump_speed = self.now_jump_speed - Astronaut.GRAVITY_ACCELERATION_MPH * frame_time
self.y += self.now_jump_speed * frame_time
if self.now_jump_speed <= 0:
self.jump_state = Astronaut.FALLING
elif self.jump_state == Astronaut.FALLING:
self.now_jump_speed = self.now_jump_speed + Astronaut.GRAVITY_ACCELERATION_MPH * frame_time
self.y -= self.now_jump_speed * frame_time
elif self.jump_gap > 0 and not(self.jump_state == self.JUMP):
self.y += self.jump_gap
self.jump_gap = 0
if self.y <= self.last_plate:
self.y = self.last_plate
self.jump_state = Astronaut.NOT_JUMP
def handle_event(self, event):
# 바운딩박스 그리기
if event.type == SDL_KEYDOWN and event.key == SDLK_F12:
self.draw_bb_bool = not self.draw_bb_bool
# 방향전환
elif event.type == SDL_KEYDOWN and event.key == SDLK_LEFT:
self.direction = self.LEFT_DIRECT
self.is_move += 1
elif event.type == SDL_KEYUP and event.key == SDLK_LEFT:
self.is_move -= 1
elif event.type == SDL_KEYDOWN and event.key == SDLK_RIGHT:
self.direction = self.RIGHT_DIRECT
self.is_move += 1
elif event.type == SDL_KEYUP and event.key == SDLK_RIGHT:
self.is_move -= 1
elif event.type == SDL_KEYDOWN and event.key == SDLK_DOWN:
self.down_key = True
elif event.type == SDL_KEYUP and event.key == SDLK_DOWN:
self.down_key = False
# 무기 교체
elif event.type == SDL_KEYDOWN and event.key == SDLK_1:
self.weapon = Astronaut.BLUE
elif event.type == SDL_KEYDOWN and event.key == SDLK_2:
self.weapon = Astronaut.PURPLE
elif event.type == SDL_KEYDOWN and event.key == SDLK_3:
self.weapon = Astronaut.YELLOW
elif event.type == SDL_KEYDOWN and event.key == SDLK_4:
self.weapon = Astronaut.GREEN
elif event.type == SDL_KEYDOWN and event.key == SDLK_5:
self.weapon = Astronaut.RED
# 총발사
elif event.type == SDL_KEYDOWN and event.key == SDLK_c:
self.is_shot = True
elif event.type == SDL_KEYUP and event.key == SDLK_c:
self.is_shot = False
self.shot_total_frame = 0
self.shot_frame = 0
# 점프
elif event.type == SDL_KEYDOWN and event.key == SDLK_x and self.jump_state == Astronaut.NOT_JUMP:
if self.down_key == False:
self.now_jump_speed = Astronaut.JUMP_SPEED_PPS
self.jump_state = Astronaut.JUMP
else:
self.y -= 0.5
self.jump_state = Astronaut.FALLING
def get_bb(self):
return self.x - 18, self.y - 42, self.x + 18, self.y + 27
def draw_bb(self):
draw_rectangle(*self.get_bb())
def draw(self,frame_time):
if self.direction == "6":
self.image.clip_draw((self.weapon_property[self.weapon]['draw_frame_num'] * 72) + self.move_frame * 72, (6 - self.shot_frame) * 96,72, 96, self.x, self.y)
else:
self.image.clip_draw((self.weapon_property[self.weapon]['draw_frame_num'] * 72) + self.move_frame * 72, (3 - self.shot_frame) * 96, 72, 96, self.x, self.y)
if self.draw_bb_bool:
self.draw_bb()
draw_rectangle(0,self.last_plate - 42,1024,0)
self.life_image.clip_draw(0, self.life * 18, 69, 18, self.x, self.y + 43)
|
# -*- coding:utf-8 -*-
import sys, socket, json, threading, random
from PyQt5.QtWidgets import (QApplication, QWidget, QDesktopWidget, QMessageBox,
QPushButton, QTextEdit, QLineEdit, QToolTip, QLabel,
QTextEdit, QMessageBox, QProgressDialog, QComboBox,
QMainWindow, QFrame, QDesktopWidget)
from PyQt5.QtGui import QIcon, QFont, QPainter, QColor
from PyQt5.QtCore import QCoreApplication, Qt, QBasicTimer, pyqtSignal
HOST = "127.0.0.1" # 服务器ip
PORT = 80 # 服务器端口号
ADDR = (HOST, PORT) # 服务器地址
# 客户端登录界面
class Login(QWidget):
def __init__(self):
# 初始界面
super().__init__()
self.initUI()
def initUI(self):
# 界面大小,标题, 图标
self.setGeometry(500, 500, 300, 300)
self.center()
self.setWindowTitle("三人俄罗斯方块")
self.setWindowIcon(QIcon("眼镜蛇.png"))
# 创建label
self.lab1 = QLabel("用户名", self)
self.lab1.setGeometry(20, 50, 100, 30)
self.lab2 = QLabel("服务器地址", self)
self.lab2.setGeometry(20, 100, 100, 30)
# 创建用输入框
self.current_username = QLineEdit("请输入用户名", self)
self.current_username.setGeometry(100, 50, 150, 30)
self.current_username.selectAll()
self.addr_input = QLineEdit("", self)
self.addr_input.setGeometry(100, 100, 150, 30)
# 设置提示文本字体和大小
QToolTip.setFont(QFont("SansSerif", 10))
# 设置文本提示
self.addr_input.setToolTip("公网连接无需填写,局域网内填写地址")
# 获得主机名
self.hostname = socket.gethostname()
# 用户名
if len(self.hostname) != 0:
self.current_username.setText(self.hostname)
self.current_username.selectAll()
# 设置聚焦
self.current_username.setFocus()
# 退出键
self.quit_btn = QPushButton("退出", self)
self.quit_btn.clicked.connect(QCoreApplication.instance().quit)
self.quit_btn.resize(100, 30)
self.quit_btn.move(170, 200)
# 登录键
self.login_btn = QPushButton("登录", self)
self.login_btn.resize(100, 30)
self.login_btn.move(30, 200)
self.login_btn.clicked.connect(self.login)
self.show()
def login(self):
# 登录提示
current_name = self.current_username.text()
if len(self.addr_input.text()) != 0:
conn_info = self.addr_input.text()
HOST = self.addr_input.text().split(":")[0]
PORT = int(self.addr_input.text().split(":")[1])
else:
pass
if len(current_name) == 0:
current_name = "用户名为空"
QMessageBox.about(self, "警报", current_name)
self.username.clear()
self.username.setFocus()
else:
QMessageBox.about(self, "用户名", "用户名:" + current_name)
self.client = socket.socket()
self.client.connect(ADDR)
self.menu = Menu()
self.menu.getconnect(current_name, self.client)
self.menu.show()
self.close()
def showprogress(self):
# 进度条
self.progress = QProgressDialog(self)
self.progress.setWindowTitle("登录")
self.progress.setLabelText("正在连接中")
self.progress.setCancelButtonText("取消")
self.progress.setMinimumDuration(5)
self.progress.setWindowModality(Qt.WindowModal)
self.progress.setRange(0, 100000)
for i in range(100000):
self.progress.setValue(i)
if self.progress.wasCanceled():
QMessageBox.warning(self, "提示", "操作失败")
break
else:
self.progress.setValue(100000)
def center(self):
# 界面居中
self.qr = self.frameGeometry()
self.cp = QDesktopWidget().availableGeometry().center()
self.qr.moveCenter(self.cp)
self.move(self.qr.topLeft())
def closeEvent(self, event):
# 关闭提示
reply = QMessageBox.question(self, '三人贪吃蛇',"你确定要离开吗?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
# 主菜单界面
# 消息列表
class Menu(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
# menu页
self.setGeometry(300, 300, 300, 300)
self.center()
self.setWindowTitle("三人俄罗斯方块")
self.setWindowIcon(QIcon("眼镜蛇.png"))
self.title = QLabel(" 系统消息", self)
self.title.setGeometry(0, 0, 300, 20)
self.user_lab1 = QLabel("房主:", self)
self.user_lab1.setGeometry(20, 180, 50, 20)
self.user_lab2 = QLabel("玩家一:", self)
self.user_lab2.setGeometry(120, 180, 50, 20)
self.user_lab3 = QLabel("玩家二:", self)
self.user_lab3.setGeometry(210, 180, 50, 20)
self.username_lab1 = QLabel("", self)
self.username_lab1.setGeometry(50, 180, 50, 20)
self.username_lab2 = QLabel("", self)
self.username_lab2.setGeometry(160, 180, 50, 20)
self.username_lab3 = QLabel("", self)
self.username_lab3.setGeometry(250, 180, 50, 20)
# 消息列表
self.msg_list = QTextEdit("",self)
self.msg_list.setGeometry(10, 20, 280, 150)
self.lab1 = QLabel("难度", self)
self.lab1.setGeometry(10, 220, 30, 20)
self.lab2 = QLabel("速度", self)
self.lab2.setGeometry(10, 250, 30, 20)
self.lab3 = QLabel("", self)
self.lab3.setGeometry(150 , 250, 30, 30)
# 难度选择
self.double_box1 = QComboBox(self)
self.double_box1.setGeometry(50, 220, 60, 20)
self.double_box1.addItem("1")
self.double_box1.addItem("2")
self.double_box1.addItem("3")
self.double_box1.addItem("4")
# 下降速度选择
self.double_box2 = QComboBox(self)
self.double_box2.setGeometry(50, 250, 60, 20)
self.double_box2.addItem("1")
self.double_box2.addItem("2")
self.double_box2.addItem("3")
self.double_box2.addItem("4")
self.double_box1.currentTextChanged.connect(self.get_difficulty)
self.double_box2.currentTextChanged.connect(self.get_speed)
# 等待游戏键
self.wait_btn = QPushButton("", self)
self.wait_btn.setGeometry(200, 220, 50, 50)
self.wait_btn.setIcon(QIcon("暂停.png"))
# # 显示分数与等级
# self.player_1__name = QLabel:
# self.player_1_grade = QLabel("", self)
# self.player_1_grade.setGeometry()
# self.player_2_grade = QLabel("", self)
# self.player_2_grade.setGeometry()
# self.player_3_grade = QLabel("", self)
# self.player_3_grade.setGeometry()
# self.player_1_level = QLabel("", self)
# self.player_1_level.setGeometry()
# self.player_2_level = QLabel("", self)
# self.player_2_level.setGeometry()
# self.player_3_level = QLabel("", self)
# self.player_3_level.setGeometry()
# 设置文本框只可读
self.msg_list.setReadOnly(True)
self.actor = None
def get_difficulty(self):
# 监听并获取难度等级
self.difficulty = self.double_box1.currentText()
print("难度等级" + self.difficulty)
def get_speed(self):
# 监听并获取方块下降速度
self.speed = self.double_box2.currentText()
print("下降速度" + self.speed)
def getconnect(self, username, client):
# 传递用户名与连接
self.username = username
self.client = client
self.data = {}
self.data["username"] = username
self.data["behavior"] = "enter"
self.client.send((json.dumps(self.data)).encode("utf-8"))
self.msg = json.loads((self.client.recv(50960)).decode("utf-8"))
print(self.msg)
if self.msg["result"] == "OK":
self.showprogress()
self.msg_list.append("服务器连接成功")
self.msg_list.append("server: " + self.msg["reason"])
if self.msg["number"] == 0:
self.msg_list.append("system: " + username + " 成为房主")
self.username_lab1.setText(username)
if self.msg["room_content"] == 0:
pass
elif self.msg["room_content"] == 1:
pass
elif self.msg["room_content"] == 2:
self.username_lab2.setText(self.msg["new_user2"])
self.msg_list.append("system: " + self.msg["new_user2"] + "成为玩家2")
elif self.msg["room_content"] == 3:
self.username_lab2.setText(self.msg["new_user2"])
self.msg_list.append("system: " + self.msg["new_user2"] + "成为玩家2")
self.username_lab3.setText(self.msg["new_user3"])
self.msg_list.append("system: " + self.msg["new_user3"] + "成为玩家3")
else:
self.msg_list.append("system: " + username + " 成为玩家" +
str(self.msg["number"] + 1))
if self.msg["room_content"] == 0:
pass
elif self.msg["room_content"] == 1:
pass
elif self.msg["room_content"] == 2:
self.username_lab1.setText(self.msg["new_roomer"])
self.msg_list.append("system: " + self.msg["new_roomer"] + " 是房主")
self.username_lab2.setText(self.msg["new_user2"])
self.username_lab3.setText("")
elif self.msg["room_content"] == 3:
self.username_lab1.setText(self.msg["new_roomer"])
self.msg_list.append("system: " + self.msg["new_roomer"] + " 是房主")
self.username_lab2.setText(self.msg["new_user2"])
self.msg_list.append("system: " + self.msg["new_user2"] + " 是玩家2")
self.username_lab3.setText(self.msg["new_user3"])
elif self.msg["result"] == "refuse":
self.showprogress()
self.msg_list.append("服务器连接失败")
self.msg_list.append("error: " + self.msg["reason"])
chat = threading.Thread(target=self.manage, args=())
chat.start()
def manage(self):
# 消息监听
while True:
self.rec = self.client.recv(40960)
print(self.rec)
self.rec = json.loads((self.rec).decode("utf-8"))
print(self.rec)
if self.rec["result"] == "OK":
if self.rec["behavior"] == "add new user":
print("加入新的玩家")
self.msg_list.append("system: " + "玩家" +
str(self.rec["new_user_number"] + 1) +
" " + str(self.rec["new_username"]) + " 加入房间")
print("*"*100)
if self.rec["room_content"] == 0:
self.username_lab1.setText("")
self.username_lab2.setText("")
self.username_lab3.setText("")
elif self.rec["room_content"] == 1:
self.username_lab1.setText(self.rec["new_roomer"])
self.username_lab2.setText("")
self.username_lab3.setText("")
elif self.rec["room_content"] == 2:
self.username_lab1.setText(self.rec["new_roomer"])
self.username_lab2.setText(self.rec["new_user2"])
self.username_lab3.setText("")
elif self.rec["room_content"] == 3:
self.username_lab1.setText(self.rec["new_roomer"])
self.username_lab2.setText(self.rec["new_user2"])
print("hello world")
self.username_lab3.setText(self.rec["new_user3"])
print("goodbye world")
elif self.rec["behavior"] == "user leave":
if self.rec["leave_username"] == self.username:
self.msg_list.append("system: 本机离开房间")
else:
self.msg_list.append("system: 玩家"+
str(self.rec["leave_user_number"] + 1)+
" " + str(self.rec["leave_username"]) +
" 离开房间")
print(self.rec["leave_user_number"])
if (self.rec["room_content"]) == 0:
self.username_lab1.setText("")
self.username_lab2.setText("")
self.username_lab3.setText("")
elif (self.rec["room_content"]) == 1:
self.username_lab1.setText(self.rec["new_roomer"])
self.username_lab2.setText("")
self.username_lab3.setText("")
self.msg_list.setText("system: 房间中剩余玩家为 房主 " + self.rec["new_roomer"] + " 其余玩家已离开")
elif (self.rec["room_content"]) == 2:
self.username_lab1.setText(self.rec["new_roomer"])
self.username_lab2.setText(self.rec["new_user2"])
self.username_lab3.setText("")
elif (self.rec["room_content"]) == 3:
self.username_lab1.setText(self.rec["new_roomer"])
self.username_lab2.setText(self.rec["new_user2"])
self.username_lab3.setText(self.rec["new_user3"])
elif self.rec["behavior"] == "roomer leave":
self.msg_list.append("system: 房主 " + self.rec["leave_username"] + " 离开房间")
if (self.rec["room_content"]) == 0:
self.username_lab1.setText("")
self.username_lab2.setText("")
self.username_lab3.setText("")
elif (self.rec["room_content"]) == 1:
self.username_lab1.setText(self.rec["new_roomer"])
self.username_lab2.setText("")
self.username_lab3.setText("")
self.msg_list.append("system: " + self.rec["new_roomer"] + " 成为房主")
elif (self.rec["room_content"]) == 2:
self.username_lab1.setText(self.rec["new_roomer"])
self.username_lab2.setText(self.rec["new_user2"])
self.username_lab3.setText("")
self.msg_list.append("system: " + self.rec["new_roomer"] + " 成为房主")
self.msg_list.append("system: " + self.rec["new_user2"] + " 成为玩家2")
elif (self.rec["room_content"]) == 3:
self.username_lab1.setText(self.rec["new_roomer"])
self.username_lab2.setText(self.rec["new_user2"])
self.username_lab3.setText(self.rec["new_user3"])
self.msg_list.append("system: " + self.rec["new_roomer"] + " 成为房主")
self.msg_list.append("system: " + self.rec["new_user2"] + " 成为玩家2")
self.msg_list.append("system: " + self.rec["new_user3"] + " 成为玩家3")
elif self.rec["behavior"] == "ready to begain":
self.msg_list.append("system: 房间已满,可以开始游戏了")
# 设置开始图标
self.wait_btn.setIcon(QIcon("开始.png"))
# 添加开始游戏的功能
self.wait_btn.clicked.connect(self.start_game)
elif self.rec["behavior"] == "please wait":
self.msg_list.append("system: 房间已满,等待房主开始游戏")
self.wait_btn.setIcon(QIcon("等待.png"))
elif self.rec["behavior"] == "not full":
self.msg_list.append("system: 房间未满,等待玩家加入")
elif self.rec["behavior"] == "game start":
self.play_game()
break
def play_game(self):
self.game = Tetris()
print(type(self.client))
self.game.get_connect(self.client, self.username)
self.game.show()
self.close()
def start_game(self):
# 房主开始游戏
msg = {
"behavior": "start game",
"username": self.username
}
msg = (json.dumps(msg)).encode("utf-8")
self.client.send(msg)
self.game = Tetris()
print(type(self.client))
self.game.get_connect(self.client, self.username)
self.game.show()
self.close()
def showprogress(self):
# 进度条
self.progress = QProgressDialog(self)
self.progress.setWindowTitle("登录")
self.progress.setLabelText("正在连接中")
self.progress.setCancelButtonText("取消")
self.progress.setMinimumDuration(5)
self.progress.setWindowModality(Qt.WindowModal)
self.progress.setRange(0, 50000)
for i in range(50000):
self.progress.setValue(i)
if self.progress.wasCanceled():
QMessageBox.warning(self, "提示", "操作失败")
break
else:
self.progress.setValue(50000)
def center(self):
# 界面居中
self.qr = self.frameGeometry()
self.cp = QDesktopWidget().availableGeometry().center()
self.qr.moveCenter(self.cp)
self.move(self.qr.topLeft())
# def closeEvent(self, event):
# # 关闭提示
# reply = QMessageBox.question(self, '三人俄罗斯方块',"你确定要离开吗?",
# QMessageBox.Yes | QMessageBox.No,
# QMessageBox.No)
# if reply == QMessageBox.Yes:
# event.accept()
# else:
# event.ignore()
# 游戏模块
class Tetris(QMainWindow):
# 游戏界面
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.tboard = Board(self)
self.setCentralWidget(self.tboard)
self.statusbar = self.statusBar()
self.tboard.msg2Statusbar[str].connect(self.statusbar.showMessage)
self.tboard.start()
self.resize(180, 380)
self.center()
self.setWindowTitle('三人俄罗斯方块')
self.setWindowIcon(QIcon("眼镜蛇.png"))
def get_connect(self, client, username):
self.tboard.get_connect(client, username)
self.setWindowTitle(username + ' 三人俄罗斯方块')
def closeEvent(self, event):
# 关闭提示
reply = QMessageBox.question(self, '三人俄罗斯方块',"你确定要离开吗?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.Yes:
# self.data["behavior"] = "quit"
# self.client.sendall((json.dumps(self.data)).encode("utf-8"))
event.accept()
else:
event.ignore()
def center(self):
# 界面居中
self.qr = self.frameGeometry()
self.cp = QDesktopWidget().availableGeometry().center()
self.qr.moveCenter(self.cp)
self.move(self.qr.topLeft())
def showprogress(self):
# 进度条
self.progress = QProgressDialog(self)
self.progress.setWindowTitle("加载中")
self.progress.setLabelText("正在加载游戏")
self.progress.setCancelButtonText("取消")
self.progress.setMinimumDuration(5)
self.progress.setWindowModality(Qt.WindowModal)
self.progress.setRange(0, 50000)
for i in range(50000):
self.progress.setValue(i)
if self.progress.wasCanceled():
QMessageBox.warning(self, "提示", "操作失败")
break
else:
self.progress.setValue(50000)
class Board(QFrame):
msg2Statusbar = pyqtSignal(str)
BoardWidth = 10
BoardHeight = 22
Speed = 300
def __init__(self, parent):
super().__init__(parent)
self.initBoard()
def get_connect(self, client, username):
self.client = client
print(type(self.client))
self.username = username
def initBoard(self):
self.timer = QBasicTimer()
self.isWaitingAfterLine = False
self.curX = 0
self.curY = 0
self.numLinesRemoved = 0
self.board = []
self.setFocusPolicy(Qt.StrongFocus)
self.isStarted = False
self.isPaused = False
self.clearBoard()
def shapeAt(self, x, y):
return self.board[(y * Board.BoardWidth) + x]
def setShapeAt(self, x, y, shape):
self.board[(y * Board.BoardWidth) + x] = shape
def squareWidth(self):
return self.contentsRect().width() // Board.BoardWidth
def squareHeight(self):
return self.contentsRect().height() // Board.BoardHeight
def start(self):
if self.isPaused:
return
self.isStarted = True
self.isWaitingAfterLine = False
self.numLinesRemoved = 0
self.clearBoard()
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.newPiece()
self.timer.start(Board.Speed, self)
def pause(self):
if not self.isStarted:
return
self.isPaused = not self.isPaused
if self.isPaused:
self.timer.stop()
self.msg2Statusbar.emit("paused")
else:
self.timer.start(Board.Speed, self)
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.update()
def paintEvent(self, event):
painter = QPainter(self)
rect = self.contentsRect()
boardTop = rect.bottom() - Board.BoardHeight * self.squareHeight()
for i in range(Board.BoardHeight):
for j in range(Board.BoardWidth):
shape = self.shapeAt(j, Board.BoardHeight - i - 1)
if shape != Tetrominoe.NoShape:
self.drawSquare(painter,
rect.left() + j * self.squareWidth(),
boardTop + i * self.squareHeight(), shape)
if self.curPiece.shape() != Tetrominoe.NoShape:
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY - self.curPiece.y(i)
self.drawSquare(painter, rect.left() + x * self.squareWidth(),
boardTop + (Board.BoardHeight - y - 1) * self.squareHeight(),
self.curPiece.shape())
def keyPressEvent(self, event):
if not self.isStarted or self.curPiece.shape() == Tetrominoe.NoShape:
super(Board, self).keyPressEvent(event)
return
key = event.key()
if key == Qt.Key_Enter:
self.pause()
return
if self.isPaused:
return
elif key == Qt.Key_Left:
self.tryMove(self.curPiece, self.curX - 1, self.curY)
elif key == Qt.Key_Right:
self.tryMove(self.curPiece, self.curX + 1, self.curY)
elif key == Qt.Key_Up:
self.tryMove(self.curPiece.rotateLeft(), self.curX, self.curY)
elif key == Qt.Key_Down:
self.oneLineDown()
else:
super(Board, self).keyPressEvent(event)
def timerEvent(self, event):
if event.timerId() == self.timer.timerId():
if self.isWaitingAfterLine:
self.isWaitingAfterLine = False
self.newPiece()
else:
self.oneLineDown()
else:
super(Board, self).timerEvent(event)
def clearBoard(self):
for i in range(Board.BoardHeight * Board.BoardWidth):
self.board.append(Tetrominoe.NoShape)
def oneLineDown(self):
if not self.tryMove(self.curPiece, self.curX, self.curY - 1):
self.pieceDropped()
def pieceDropped(self):
for i in range(4):
x = self.curX + self.curPiece.x(i)
y = self.curY - self.curPiece.y(i)
self.setShapeAt(x, y, self.curPiece.shape())
self.removeFullLines()
if not self.isWaitingAfterLine:
self.newPiece()
def removeFullLines(self):
numFullLines = 0
rowsToRemove = []
for i in range(Board.BoardHeight):
n = 0
for j in range(Board.BoardWidth):
if not self.shapeAt(j, i) == Tetrominoe.NoShape:
n = n + 1
if n == 10:
rowsToRemove.append(i)
rowsToRemove.reverse()
for m in rowsToRemove:
for k in range(m, Board.BoardHeight):
for l in range(Board.BoardWidth):
self.setShapeAt(l, k, self.shapeAt(l, k + 1))
numFullLines = numFullLines + len(rowsToRemove)
if numFullLines > 0:
self.numLinesRemoved = self.numLinesRemoved + numFullLines
msg = {
"behavior":"grade",
"grade":self.numLinesRemoved,
"username":self.username
}
self.client.send((json.dumps(msg)).encode("utf-8"))
self.msg2Statusbar.emit(str(self.numLinesRemoved))
self.isWaitingAfterLine = True
self.curPiece.setShape(Tetrominoe.NoShape)
self.update()
def newPiece(self):
self.curPiece = Shape()
self.curPiece.setRandomShape()
self.curX = Board.BoardWidth // 2 + 1
self.curY = Board.BoardHeight - 1 + self.curPiece.minY()
if not self.tryMove(self.curPiece, self.curX, self.curY):
self.curPiece.setShape(Tetrominoe.NoShape)
self.timer.stop()
self.isStarted = False
self.msg2Statusbar.emit("Game over")
def tryMove(self, newPiece, newX, newY):
for i in range(4):
x = newX + newPiece.x(i)
y = newY - newPiece.y(i)
if x < 0 or x >= Board.BoardWidth or y < 0 or y >= Board.BoardHeight:
return False
if self.shapeAt(x, y) != Tetrominoe.NoShape:
return False
self.curPiece = newPiece
self.curX = newX
self.curY = newY
self.update()
return True
def drawSquare(self, painter, x, y, shape):
colorTable = [0x000000, 0xCC6666, 0x66CC66, 0x6666CC,
0xCCCC66, 0xCC66CC, 0x66CCCC, 0xDAAA00]
color = QColor(colorTable[shape])
painter.fillRect(x + 1, y + 1, self.squareWidth() - 2,
self.squareHeight() - 2, color)
painter.setPen(color.lighter())
painter.drawLine(x, y + self.squareHeight() - 1, x, y)
painter.drawLine(x, y, x + self.squareWidth() - 1, y)
painter.setPen(color.darker())
painter.drawLine(x + 1, y + self.squareHeight() - 1,
x + self.squareWidth() - 1, y + self.squareHeight() - 1)
painter.drawLine(x + self.squareWidth() - 1,
y + self.squareHeight() - 1, x + self.squareWidth() - 1, y + 1)
class Tetrominoe(object):
NoShape = 0
ZShape = 1
SShape = 2
LineShape = 3
TShape = 4
SquareShape = 5
LShape = 6
MirroredLShape = 7
class Shape(object):
coordsTable = (
((0, 0), (0, 0), (0, 0), (0, 0)),
((0, -1), (0, 0), (-1, 0), (-1, 1)),
((0, -1), (0, 0), (1, 0), (1, 1)),
((0, -1), (0, 0), (0, 1), (0, 2)),
((-1, 0), (0, 0), (1, 0), (0, 1)),
((0, 0), (1, 0), (0, 1), (1, 1)),
((-1, -1), (0, -1), (0, 0), (0, 1)),
((1, -1), (0, -1), (0, 0), (0, 1))
)
def __init__(self):
self.coords = [[0,0] for i in range(4)]
self.pieceShape = Tetrominoe.NoShape
self.setShape(Tetrominoe.NoShape)
def shape(self):
return self.pieceShape
def setShape(self, shape):
table = Shape.coordsTable[shape]
for i in range(4):
for j in range(2):
self.coords[i][j] = table[i][j]
self.pieceShape = shape
def setRandomShape(self):
self.setShape(random.randint(1, 7))
def x(self, index):
return self.coords[index][0]
def y(self, index):
return self.coords[index][1]
def setX(self, index, x):
self.coords[index][0] = x
def setY(self, index, y):
self.coords[index][1] = y
def minX(self):
m = self.coords[0][0]
for i in range(4):
m = min(m, self.coords[i][0])
return m
def maxX(self):
m = self.coords[0][0]
for i in range(4):
m = max(m, self.coords[i][0])
return m
def minY(self):
m = self.coords[0][1]
for i in range(4):
m = min(m, self.coords[i][1])
return m
def maxY(self):
m = self.coords[0][1]
for i in range(4):
m = max(m, self.coords[i][1])
return m
def rotateLeft(self):
if self.pieceShape == Tetrominoe.SquareShape:
return self
result = Shape()
result.pieceShape = self.pieceShape
for i in range(4):
result.setX(i, self.y(i))
result.setY(i, -self.x(i))
return result
def rotateRight(self):
if self.pieceShape == Tetrominoe.SquareShape:
return self
result = Shape()
result.pieceShape = self.pieceShape
for i in range(4):
result.setX(i, -self.y(i))
result.setY(i, self.x(i))
return result
if __name__ == '__main__':
app = QApplication(sys.argv)
login = Login()
sys.exit(app.exec_()) |
# -*- coding: utf-8 -*-
import nltk
if __name__ == '__main__':
# get NLTK stopwords
nltk.download("stopwords")
# get VADER lexicon
nltk.download('vader_lexicon')
|
# source: Internet
def r(a):
i = a.find('0')
~i or exit(a)
[m in[(i-j)%9*(i/9^j/9)*(i/27^j/27|i%9/3^j%9/3)or a[j]for j in range(81)] or r(a[:i]+m+a[i+1:])for m in'%d'%5**18]
from sys import *
r(argv[1])
# inp: 530070000600195000098000060800060003400803001700020006060000280000419005000080079 |
from classes.Controller.Scanner_Controller import *
from classes.View.Scanner_View import *
from classes.Model.Scanner_Model import *
class PortscanMain:
def __init__(self):
self.model = dataObject()
self.controller = dataHandler(self.model)
self.view = Window(self.controller, self.model)
PortscanMain() |
import pickle,os,random
class newacc:
def __init__(self):
self.user="null"
self.password="null"
self.acctype=0
self.accno=0
self.name="null"
self.gender="null"
self.income=0
self.email="null"
self.dob="null"
self.address="null"
self.desig="null"
self.company="null"
self.telno="null"
self.mobno="null"
self.balance=0
def uspas(self):
digits=' 0123456789 '
us=' '
while True:
try:
length=Exception("Maximum no: of char = 10 .")
num=Exception("Should contain digits ." )
dig=0
us=raw_input("Enter the username you wish to use (maximum 10 characters and should contain numeric characters) : ")
if len(us)>10:
raise length
else:
for i in us:
if i.isdigit():
dig=1
break
if dig ==0:
raise num
except Exception, length:
print length.message
except Exception, num:
print num.message
else:
self.user=us
break
while True:
try:
length=Exception("Maximum no: of char = 10 .")
num=Exception("Should contain digits ." )
pas=raw_input("Enter the password (Your password must contain Base 10 digits (0 through 9) and a minimum length of 10 characters :")
dig=0
for i in pas:
if i in digits:
dig+=1
break
z=len(pas)
if dig==0:
raise num
elif z<10:
raise length
except Exception, num:
print num.message
except Exception, length:
print length.message
else:
cpd=raw_input("Confirm your password: ")
if pas==cpd:
print "Your account has been created."
self.password=pas
break
else:
print "Passwords do not match."
def getdata(self):
print "At OOEHS we offer three types of accounts depending on your day to day needs"
print "1)Savings Account \n2) Current Account \n3) Fixed Deposit Account"
self.acctype=input ("For Savings Account , Press 1. \nFor Current Account , Press 2. \nFor Fixed Deposit Account, Press 3. \nYour choice: ")
if self.acctype==1:
self.balance=8000
elif self.acctype==2:
self.balance=4000
elif self.acctype==3:
self.balance=10000
self.income=input("Monthly Income: ")
if self.income <5000:
print "You are not eligible for creating a call deposit account as your monthly income is less than 5000. "
self.getdata()
self.accno=random.randint(111111,999999)
self.name=raw_input("Name: ")
self.gender=raw_input("Gender (M/F): ")
self.email=raw_input("Email ID: ")
self.dob=raw_input("Date of Birth (dd/mm/yyyy): ")
self.address=raw_input("Residence Address: ")
self.desig=raw_input("Designation: ")
self.company=raw_input("Company Name: ")
self.telno=raw_input("Telephone Number: ")
self.mobno=raw_input("Mobile Number: ")
self.uspas()
print "PLS NOTE DOWN YOUR ACCOUNT NUMBER: ",self.accno
def update(self,c):
print "NOTE: Account Type, Account Number and Balance cannot be updated. \n Only personal details can be updated. "
self.acctype,self.balance,self.accno=c.acctype,c.balance,c.accno
ch=raw_input("Do you want to change the name? ")
if ch.lower()=='y' or ch.lower()=='yes':
self.name=raw_input("Name:")
else:
self.name=c.name
ch=raw_input("Do you want to change the income?")
if ch.lower()=='y' or ch.lower()=='yes':
self.income=raw_input("Income: ")
else:
self.income=c.income
ch=raw_input("Do you want to update the gender information?")
if ch.lower()=='y' or ch.lower()=='yes':
self.gender=raw_input("Gender(M/F): ")
else:
self.gender=c.gender
ch=raw_input("Do you want to change the email id?")
if ch.lower()=='y'or ch.lower()=='yes':
self.email=raw_input("Email ID: ")
else:
self.email=c.email
ch=raw_input("Do you want to change the date of birth?")
if ch.lower()=='y'or ch.lower()=='yes':
self.dob=raw_input("Date of Birth (dd/mm/yyyy): ")
else:
self.dob=c.dob
ch=raw_input("Do you want to change the Residence Address?")
if ch.lower()=='y'or ch.lower()=='yes':
self.address=raw_input("Residence Address: ")
else:
self.address=c.address
ch=raw_input("Do you want to change the designation?")
if ch.lower()=='y'or ch.lower()=='yes':
self.desig=raw_input("Designation: ")
else:
self.desig=c.desig
ch=raw_input("Do you want to change the company?")
if ch.lower()=='y'or ch.lower()=='yes':
self.company=raw_input("Company Name : ")
else:
self.company=c.company
ch=raw_input("Do you want to change the telephone number?")
if ch.lower()=='y'or ch.lower()=='yes':
self.telno=raw_input("Telephone Number: ")
else:
self.telno=c.telno
ch=raw_input("Do you want to change the mobile number?")
if ch.lower()=='y'or ch.lower()=='yes':
self.mobno=raw_input("Mobile Number: ")
else:
self.mobno=c.mobno
ch=raw_input("Do you want to change the username and password?")
if ch.lower()=='y'or ch.lower()=='yes':
self.uspas()
else:
self.user=c.user
self.password=c.password
print "Your Personal details have been updated successfully"
def display(self):
print "Personal Details"
print "-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"
print "%10s\t%4s\t%6s\t%13s\t%10s\t%8s\t%8s\t%8s\t%9s\t%9s\t"%(self.name,self.gender,self.income,self.email,self.dob,self.address,self.desig,self.company,self.telno,self.mobno)
print
print "Account Details"
print "------------------------------------"
print "%8s\t%10s\t%6s\t" %(self.accno,self.acctype,self.balance)
print
class LOANS:
def __init__(self):
self.accno=0
self.name="null"
self.Type=0
self.r=0
self.p=0
self.ty=0
self.tm=0
self.repay="null"
self.amt=0
def loans(self,c):
self.accno=c.accno
self.name=c.name
print "OOEHS Banking provides a wide range of loan options to choose from. \nThis quality of utility gives you the decision of selecting the type of loan you wish to use depending on your purpose. \n And as responsible bankers, your satisfaction is our priority ."
self.Type=input("1.Home Equity \n2.Education \n3.Personal \n4.Buisness \n5.Vehicle \n6.Farming \nEnter the type of loan you wish to take: ")
if self.Type==1:
self.r=random.randint(14,17)
print self.r, "% is the interest rate per annum."
elif self.Type==2:
self.r=random.randint(6,9)
print self.r, "% is the interest rate per annum."
elif self.Type==3:
self.r=random.randint(5,8)
print self.r, "% is the interest rate per annum."
elif self.Type==4:
self.r=random.randint(10,13)
print self.r, "% is the interest rate per annum."
elif self.Type==5:
self.r=random.randint(8,12)
print self.r, "% is the interest rate per annum."
elif self.Type==6:
self.r=random.randint(3,5)
print self.r, "% is the interest rate per annum."
asset=raw_input("Do you have an asset(y/n)? ")
if asset=='n':
print 'Without an asset, you do not have the permission to take the loan.'
elif asset=='y':
self.p=input("Total amount required: ")
self.ty=input("Total number of years: ")
self.tm=input("Remaining number of months: ")
T=self.ty+(self.tm/12.0)
si=(self.p*self.r*T)/100.0
L=si+self.p
print '\nHow do you wish to repay your loan?'
self.repay=input("1.One-time \n2.Monthly \n3. 6 Months \nEnter your choice: ")
if self.repay==1:
self.amt=L
print "\nAmount to be paid in exactly ", self.ty, "years and ",self.tm, " months is ", self.amt, "."
elif self.repay==2:
self.amt=L/float((self.ty*12.0)+self.tm)
print "\nAmount to be paid monthly is ",self.amt
elif self.repay==3:
self.am=L/float((self.ty*12.0)+self.tm)
self.amt=self.am*6.0
print '\nYou have to pay' , self.amt , 'for every six months.'
def newaccount():
c=newacc()
c.getdata()
f=open("account1.dat","ab")
pickle.dump(c,f)
f.close()
def existaccount():
ano=input("\nEnter your account number: ")
f=open("account1.dat","rb")
c=newacc()
validuser=0
try:
while True:
c=pickle.load(f)
if c.accno==ano:
uname=raw_input("\nEnter username: ")
pas=raw_input("\nEnter password")
if uname==c.user and pas==c.password:
validuser=1
break
except EOFError:
ano=0
if validuser==1:
print "\n\n\t\tWELCOME ", c.name.upper()
f.close()
return ano
def deposit(r,amt):
f=open("account1.dat","rb")
f1=open("temp.dat","wb")
try:
while True:
c=pickle.load(f)
if c.accno==r:
c.balance+=amt
print "\nYour Account balance is updated. \nCurrent Balance is: ", c.balance
pickle.dump(c,f1)
except EOFError:
pass
f.close()
f1.close()
os.remove("account1.dat")
os.rename("temp.dat","account1.dat")
def withdraw(r,amt):
f=open("account1.dat","rb")
f1=open("temp.dat","wb")
try:
while True:
c=pickle.load(f)
if c.accno==r:
if c.balance-amt>=500:
c.balance-=amt
print "\nAccount balance is updated.\nCurrent Balance is: ",c.balance
else:
print "\nMinimum balance should be AED 500.\n You cannot withdraw this much amount."
pickle.dump(c,f1)
except EOFError:
pass
f.close()
f1.close()
os.remove("account1.dat")
os.rename("temp.dat","account1.dat")
def custoptions(r):
while True:
os.system('cls')
print
ch=input("1.View Account details.\n\n2.Deposit.\n\n3.Withdraw.\n\n4.Update personal information \n\n5.Apply for a loan \n\nEnter your choice: ")
if ch==1:
f=open("account1.dat","rb")
try:
while True:
c=pickle.load(f)
if c.accno==r:
c.display()
break
except EOFError:
pass
f.close()
elif ch==2:
amt=input("\nEnter the amount to be deposited: ")
deposit(r,amt)
elif ch==3:
amt=input("\nEnter the amount to be withdrawn: ")
withdraw(r,amt)
elif ch==4:
flag=0
f=open("account1.dat","rb")
f1=open("temp.dat","wb")
c=newacc()
d=newacc()
try:
while True:
c=pickle.load(f)
if c.accno==r:
d.update(c)
pickle.dump(d,f1)
print "\nPlease login again with the new credentials."
flag=1
else:
pickle.dump(c,f1)
except EOFError:
pass
f.close()
f1.close()
os.remove("account1.dat")
os.rename("temp.dat","account1.dat")
if flag==1:
break
elif ch==5:
n=LOANS()
c=newacc()
f=open("account1.dat","rb")
try:
while True:
c=pickle.load(f)
if c.accno==r:
n.loans(c)
break
except EOFError:
pass
f.close()
lo=open('loans.dat','ab')
pickle.dump(n,lo)
lo.close()
opt=raw_input("\nDo you want to do some other transactions")
if opt.lower()=='no' or opt.lower()=='n':
break
def viewallcust():
f=open("account1.dat","rb")
print "------------------------------------------------------------------------------------------------------------------------------"
print "Account Number| Customer Name | Account Type | Balance | Loan Type| Loan EMI |"
print "-------------------------------------------------------------------------------------------------------------------------------"
try:
while True:
c=pickle.load(f)
f1=open("loans.dat","rb")
try:
while True:
l=pickle.load(f1)
if c.accno==l.accno:
print "%20s|%24s|%21d|%11d|%15d|%14d |\n\n" %(c.accno,c.name,c.acctype,c.balance,l.Type,l.amt)
break
except EOFError:
print "%20s|%24s|%21d|%11d|\t|\t|\n\n" %(c.accno,c.name,c.acctype,c.balance)
f1.close()
except EOFError:
pass
f.close()
def closeacc(r):
flag=0
accexist=0
f1=open("loans.dat","rb")
try:
while True:
l=pickle.load(f1)
if l.accno==r:
ch=raw_input("\nWould you like to payoff the loan amount? ")
if ch.lower()=='n' or ch.lower()=='no':
print "\nAccount cannot be closed."
flag=1
break
else:
break
except EOFError:
print "\nNo such account number."
accexist=1
pass
f1.close()
if accexist==0 or flag==0:
f=open("account1.dat","rb")
t=open("temp.dat","wb")
try:
while True:
c=pickle.load(f)
if c.accno!=r:
pickle.dump(c,t)
except EOFError:
f.close()
t.close()
os.remove("account1.dat")
os.rename("temp.dat","account1.dat")
f=open("loans.dat","rb")
t=open("temp.dat","wb")
try:
while True:
c=pickle.load(f)
if c.accno!=r:
pickle.dump(c,t)
except EOFError:
f.close()
t.close()
os.remove("loans.dat")
os.rename("temp.dat","loans.dat")
print "\nYour account has been closed."
## Check Indentations
while True:
os.system('cls')
print
print
mainmenu=input("_______ OOEHS BANKING _______ \n\n\n1.ADMIN\n\n2.CUSTOMER\n\nEnter your option as 1 or 2: ")
print
if mainmenu==1:
while True:
os.system('cls')
print"\n\n"
passkey=raw_input("Enter the passkey: ")
if passkey=="admin123":
print "\n------------ADMIN MENU------------\n\n"
adminopt=input("1.VIEW ALL CUSTOMERS DATA BANK.\n\n2.CLOSE AN ACCOUNT . \n\n Enter your choice: ")
print
if adminopt==1:
viewallcust()
elif adminopt==2:
accno=input("\nEnter the account number: ")
print
closeacc(accno)
else:
print "\n"
else:
print "\nWrong Passkey."
opt1=raw_input("\nDo you wish to try again? ")
if opt1.lower()=="no" or opt1.lower()=="n":
break
if mainmenu==2:
while True:
os.system('cls')
print "\n\n--------------CUSTOMER MENU--------------\n\n"
custopt=input("1.NEW ACCOUNT.\n\n2.EXISTING ACCOUNT.\n\nPlease enter 1 or 2: ")
print
if custopt==1:
newaccount()
elif custopt==2:
r=existaccount()
if r:
custoptions(r)
else:
print "\nThis account number does not exist."
else:
print "\nInvalid Choice."
opt2=raw_input("\nDo you wish to return to Customer Menu? ")
if opt2.lower()=='no' or opt2.lower()=='n':
break
else:
print "\nInvalid Choice"
opt3=raw_input("\nDo you wish to go to the Main Menu? ")
if opt3.lower()=='no'or opt3.lower()=='n':
break
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
#
# VanyaD - Copyright - Ektanoor <ektanoor@bk.ru> 2012
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. VanyaD is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
from vanyad_nagcinga import *
from vanyad_shelves import *
#From here we start the secondary series of classes
class FastChecks(ConnectLivestatus):
"""This class is a base for many small checks on hosts.
Most of the work is made with help of Pythons's native datatypes
"""
status=[]
def __init__(self,state,acknowledge,notification_period,notification):
ConnectLivestatus.__init__(self)
self.status=self.get_query('hosts',
('host_name','parents','last_state_change','contacts'),
('state = '+str(state),'acknowledged = '+str(acknowledge),'in_notification_period = '+str(notification_period),'notifications_enabled = '+str(notification))
)
def parents_affected(self,lapse):
downs={}
t_check=time.time()-lapse
for host_name, parents,last_state_change,contacts in self.status:
if last_state_change>=t_check:
downs[host_name]=parents
return downs
def hosts_changes(self,lapse):
downs=[]
t_check=time.time()-lapse
for host_name, parents,last_state_change,contacts in self.status:
if last_state_change<=t_check:
downs.append(host_name)
return downs
def which_contacts(self):
cnts=set()
for host_name, parents,last_state_change,contacts in self.status:
for contact in contacts:
cnts.add(contact)
return cnts
class Blacklist(ConnectLivestatus):
""" A class to determine and cleanup nodes that are considered superfluous.
"""
bl=None
nagcinga=None
comment='In blacklist'
def __init__(self):
ConnectLivestatus.__init__(self)
self.bl=OpenShelves('blacklist')
self.nagcinga=ConnectNagCinga()
def ack_host(self):
noacks=self.get_query('hosts',['host_name'],('state = 1','acknowledged = 0'))
for host_name in noacks:
if host_name[0] in self.bl.lsts:
self.nagcinga.acknowledge_host(host_name[0],1,0,0,self.comment)
def ack_svc(self):
noacks=self.get_query('services',['host_name','description'],('state = 2','acknowledged = 0'))
for host_name, description in noacks:
if description in self.bl.lsts:
self.nagcinga.acknowledge_service(host_name,description,1,0,0,self.comment)
|
import numpy as np
### For IMS data
# https://github.com/aelias-c/SCD_project/blob/1a5eb5100ee88bf9968b54a7570693c0422f6b1b/SCD_anomaly_calc.py#L92
ALEKSANDRA_SLICE = np.index_exp[158:867]
# details from https://nsidc.org/data/g02156#ancillary
IMS_24KM_UL_CORNER = (-12126597.0, 12126840.0) # note that this is the corner, not the center-of-pixel!
IMS_24KM_RES = 23684.997
IMS_24KM_GRID_SIZE = 1024
# establishing upper-right and lower-left grid-cell-corners for array-generation
IMS_24KM_UR_CORNER = (
IMS_24KM_UL_CORNER[0] + IMS_24KM_RES + (IMS_24KM_GRID_SIZE * IMS_24KM_RES),
IMS_24KM_UL_CORNER[1]
)
IMS_24KM_LL_CORNER = (
IMS_24KM_UL_CORNER[0],
IMS_24KM_UL_CORNER[1] - IMS_24KM_RES - (IMS_24KM_GRID_SIZE * IMS_24KM_RES)
)
# from upper-left center-of-pixel (need to add/subtract half a grid cell)
IMS_24KM_X_CENTERS = np.arange(
IMS_24KM_UL_CORNER[0] + IMS_24KM_RES * 0.5,
IMS_24KM_UR_CORNER[0] - IMS_24KM_RES * 0.5,
IMS_24KM_RES
)
# from lower-left center-of-pixel
IMS_24KM_Y_CENTERS = np.arange(
IMS_24KM_LL_CORNER[1] + IMS_24KM_RES * 0.5,
IMS_24KM_UL_CORNER[1] - IMS_24KM_RES * 0.5,
IMS_24KM_RES
)
### For CMC data
# details from https://khufkens.com/2014/07/24/georeferencing-daily-snow-depth-analysis-data/
# and https://nsidc.org/data/nsidc-0447
CMC_24KM_UL_CORNER = (-8405812.0, 8405812.0) # note that this is the corner, not the center-of-pixel!
CMC_24KM_RES = 23812.499
CMC_24KM_GRID_SIZE = 706
# establishing upper-left and lower-left grid-cell-corners for array-generation
CMC_24KM_UR_CORNER = (
CMC_24KM_UL_CORNER[0] + CMC_24KM_RES + (CMC_24KM_GRID_SIZE * CMC_24KM_RES),
CMC_24KM_UL_CORNER[1]
)
CMC_24KM_LL_CORNER = (
CMC_24KM_UL_CORNER[0],
CMC_24KM_UL_CORNER[1] - CMC_24KM_RES - (CMC_24KM_GRID_SIZE * CMC_24KM_RES)
)
# from upper-left center-of-pixel (need to add/subtract half a grid cell)
CMC_24KM_X_CENTERS = np.arange(
CMC_24KM_UL_CORNER[0] + CMC_24KM_RES * 0.5,
CMC_24KM_UR_CORNER[0] - CMC_24KM_RES * 0.5,
CMC_24KM_RES
)
# from lower-left center-of-pixel
CMC_24KM_Y_CENTERS = np.arange(
CMC_24KM_LL_CORNER[1] + CMC_24KM_RES * 0.5,
CMC_24KM_UL_CORNER[1] - CMC_24KM_RES * 0.5,
CMC_24KM_RES
)
def convert_xc_yc_to_meters_IMS(xarray_dataset, index_slice=ALEKSANDRA_SLICE):
'''
Add new coordinates containing Polar-Stereographic x/y in meters
note that in xarray.plot.contourf you will need to specify x='xc', y='yc'
'''
x = IMS_24KM_X_CENTERS[index_slice]
y = IMS_24KM_Y_CENTERS[index_slice]
ds = xarray_dataset.assign_coords({
'xc': x, 'yc': y
})
return ds
def convert_xc_yc_to_meters_CMC(xarray_dataset):
'''
Add new coordinates containing Polar-Stereographic x/y in meters
note that in xarray.plot.contourf you will need to specify x='xc', y='yc'
'''
x = CMC_24KM_X_CENTERS
y = CMC_24KM_Y_CENTERS
ds = xarray_dataset.assign_coords({'xc':x, 'yc':y})
return ds
|
#!/usr/bin/env python
import operator
import StringIO
import textwrap
import unittest
from testmaster import compare_metrics
# Sample E2E CSV results
OLD_CSV = '''filename,total_duration,c2s_throughput,c2s_duration,s2c_throughput,s2c_duration,latency,error,error_list
ubuntu14.04-chrome49-banjo-2016-11-29T140016Z-results.json,27.8,94.0,11.4,95.6,10.3,201.0,0,
ubuntu14.04-chrome49-banjo-2016-11-29T140902Z-results.json,27.6,94.1,11.4,95.6,10.3,202.0,0,
win10-firefox45-banjo-2016-11-29T130814Z-results.json,15.0,,,,,,1,"Timed out waiting for page to load.,Failed to load URL: http://localhost:53467/search?q=internet+speed+test"
osx10.11-chrome53-banjo-2016-11-29T150436Z-results.json,30.2,93.0,11.2,79.4,10.2,50.0,0,
osx10.11-chrome53-banjo-2016-11-29T150507Z-results.json,29.7,93.3,11.1,87.4,10.8,55.0,0,'''
# Sample E2E CSV results
NEW_CSV = '''filename,total_duration,c2s_throughput,c2s_duration,s2c_throughput,s2c_duration,latency,error,error_list
osx10.12-chrome57-banjo-2017-04-06T223328Z-results.json,36.0,93.5,11.1,92.8,10.3,73.0,0,
osx10.12-firefox52-ndt_js-2017-04-06T213908Z-results.json,32.0,94.3,11.9,93.8,10.4,85.0,0,
osx10.12-firefox52-ndt_js-2017-04-06T213940Z-results.json,32.0,94.2,11.9,93.8,10.4,85.0,0,
ubuntu16.04-chrome56-banjo-2017-04-06T212522Z-results.json,27.6,94.1,11.0,95.5,10.5,204.0,0,
win10-firefox49-banjo-2017-04-06T200458Z-results.json,33.4,94.9,11.0,96.4,10.2,43.0,0,
win10-firefox49-banjo-2017-04-06T200850Z-results.json,33.3,94.4,11.1,96.2,10.2,43.0,0,'''
# This is sample output from the compare_metrics() function. It is depenent on
# the contents of OLD_CSV and NEW_CSV above. If those change, then this may need
# to change too.
COMP_OUTPUT = [
{'os': 'osx',
'browser': 'firefox',
'client': 'ndt_js',
'metric': 's2c_throughput',
'old_avg': 'none',
'new_avg': 93.8,
'%change': 'error'},
{'os': 'win',
'browser': 'firefox',
'client': 'banjo',
'metric': 'latency',
'old_avg': 0.0,
'new_avg': 43.0,
'%change': 'error'},
{'os': 'ubuntu',
'browser': 'chrome',
'client': 'banjo',
'metric': 'total_duration',
'old_avg': 27.7,
'new_avg': 27.6,
'%change': -0.0},
]
class CompareMetricsTest(unittest.TestCase):
def setUp(self):
self.csv_fieldnames = ['os', 'browser', 'client', 'metric', 'old_avg',
'new_avg', '%change']
def test_parse_options_without_output_file_returns_default(self):
passed_args = ['--old_csv', '/tmp/lol.csv', '--new_csv',
'/opt/rofl.csv']
expected_output_file = 'e2e_comparison_results.csv'
args = compare_metrics.parse_options(passed_args)
self.assertEqual(args.output_file, expected_output_file)
def test_parse_csv(self):
# The dicts returned by parse_csv() are too large to go about checking
# them completely, even with only 4 or 5 sample rows in the CSV, so
# we'll just spot check. The following dict has the expected metric
# value as the key and the map to the result for that metric as the
# value (list). This mapping is dervied from the OLD_CSV global
# variable.
result_mappings = {
'79.4': ['osx-chrome-banjo', 'metrics', 's2c_throughput'],
'201.0': ['ubuntu-chrome-banjo', 'metrics', 'latency'],
'15.0': ['win-firefox-banjo', 'metrics', 'total_duration']
}
# Create a parsed CSV object for OLD_CSV
csv = StringIO.StringIO(OLD_CSV)
results = compare_metrics.parse_csv(csv)
# Make sure that the parsed results for OLD_CSV are what we
# expected.
for value, mapping in result_mappings.iteritems():
self.assertIn(
float(value), reduce(operator.getitem, mapping, results))
def test_average_metrics(self):
# The dict returned by average_metrics() is too large to go about
# checking it completely, even with only 4 or 5 sample rows in the
# CSV, so we'll just spot check. The following dict has the expected
# metric value as the key and the map to the result for that metric as
# the value (list). This mapping is dervied from the NEW_CSV global
# variable.
result_mappings = {
'94.25': ['osx-firefox-ndt_js', 'metrics', 'c2s_throughput'],
'33.35': ['win-firefox-banjo', 'metrics', 'total_duration']
}
# Aggregate metrics from NEW_CSV
csv = StringIO.StringIO(NEW_CSV)
results = compare_metrics.parse_csv(csv)
averages = compare_metrics.average_metrics(results)
# Check whether aggregations for NEW_CSV are the expected ones.
for value, mapping in result_mappings.iteritems():
self.assertEqual(
float(value), reduce(operator.getitem, mapping, averages))
def test_compare_metrics(self):
# Like other tests here, there are too many results to reasonably check
# for every one of them, so we spot check instead. The spot checks we
# make can be found in the global variable COMP_OUTPUT.
# Aggregate metrics from OLD_CSV
old_csv = StringIO.StringIO(OLD_CSV)
old_results = compare_metrics.parse_csv(old_csv)
old_averages = compare_metrics.average_metrics(old_results)
# Aggregate metrics from NEW_CSV
new_csv = StringIO.StringIO(NEW_CSV)
new_results = compare_metrics.parse_csv(new_csv)
new_averages = compare_metrics.average_metrics(new_results)
# Compare the aggregation results
comps = compare_metrics.compare_metrics(old_averages, new_averages)
# comps is a list of dicts. Make sure that each of our expected dicts is
# in comps.
for expected_result in COMP_OUTPUT:
self.assertIn(expected_result, comps)
def test_write_results(self):
expected_file_content = '''\
os,browser,client,metric,old_avg,new_avg,%change
osx,firefox,ndt_js,s2c_throughput,none,93.8,error
win,firefox,banjo,latency,0.0,43.0,error
ubuntu,chrome,banjo,total_duration,27.7,27.6,-0.0
'''
# Generate output and write it.
output_csv = StringIO.StringIO()
compare_metrics.write_results(output_csv, COMP_OUTPUT,
self.csv_fieldnames)
# Verify that what was written is what we expected.
self.assertEquals(
textwrap.dedent(expected_file_content), output_csv.getvalue())
if __name__ == '__main__':
unittest.main()
|
class Command:
def __init__(self, velocity: float, angular_velocity: float) -> None:
assert isinstance(velocity, float)
assert isinstance(angular_velocity, float)
self.__velocity: float = velocity
self.__anglular_velocity: float = angular_velocity
@property
def velocity(self) -> float:
return self.__velocity
@property
def angular_velocity(self) -> float:
return self.__anglular_velocity
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-23 01:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses_app', '0002_auto_20180222_2332'),
]
operations = [
migrations.AlterField(
model_name='course',
name='description',
field=models.TextField(),
),
]
|
import logging
from imp import reload
import hashlib
class TrustManagerLog:
def __init__(self):
self.filename= "log/trustmanager.log"
def configTrustManagerLog(self,logging_level):
reload(logging)
logging.basicConfig(filename=self.filename
, level= logging_level\
# ,filemode='w' \
,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' \
,datefmt='%m/%d/%Y %I:%M:%S %p')
# filemode='w' causes to overwrite the log file, use it for tests or to clean the log
def writeLog(self,source,destination,nonce,action, acceptance,type_of_message):
try:
message = """SOURCE:{source}; DESTINATION:{destination}; NONCE:{nonce}; \
ACTION:{action};ACCEPTANCE:{acceptance};""".format(source=source,destination=destination, \
nonce=nonce,action=action,acceptance=acceptance)
if(type_of_message.lower() == "info" ):
self.configTrustManagerLog(logging.INFO)
logging.info(message)
elif(type_of_message.lower() == "warning" ):
self.configTrustManagerLog(logging.WARNING)
logging.warning(message)
elif(type_of_message.lower() == "error" ):
self.configTrustManagerLog(logging.ERROR)
logging.error(message)
elif(type_of_message.lower() == "critical" ):
self.configTrustManagerLog(logging.CRITICAL)
logging.critical(message)
#print('LOG WRITTEN')
except Exception as e:
print(e)
|
# This file is part of ts_scheduler.
#
# Developed for the Rubin Observatory Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import enum
import logging
import typing
from lsst.ts import observing
from . import driver, feature_scheduler, sequential
__all__ = ["DriverFactory", "DriverType"]
class DriverType(enum.Enum):
Driver = "driver"
Sequential = "sequential"
FeatureScheduler = "feature_scheduler"
class DriverFactory:
drivers = {
DriverType.Driver: driver.Driver,
DriverType.Sequential: sequential.SequentialScheduler,
DriverType.FeatureScheduler: feature_scheduler.FeatureScheduler,
}
@classmethod
def get_driver(
cls,
driver_type: DriverType,
models: dict[str, typing.Any],
raw_telemetry: dict[str, typing.Any],
observing_blocks: dict[str, observing.ObservingBlock],
parameters: driver.DriverParameters | None = None,
log: logging.Logger | None = None,
) -> driver.Driver:
return cls.drivers[driver_type](
models=models,
raw_telemetry=raw_telemetry,
observing_blocks=observing_blocks,
parameters=parameters,
log=log,
)
|
#!/usr/bin/env python3
from baseline_np import decode, EXTENSION
import os
import glob
from zipfile import ZipFile
import time
VALOUTZIP = 'valout.zip'
VALOUTDIR = 'valout'
def get_files_info_string():
ps = os.listdir('.')
non_png = [p for p in ps if '.png' not in p]
png = [p for p in ps if '.png' in p]
return 'Found {} .png files and {}'.format(len(png), non_png)
def get_baseline_files():
assert os.path.isfile(VALOUTZIP), 'Expected {}. {}'.format(
VALOUTZIP, get_files_info_string())
print('Unzipping', VALOUTZIP, '...')
with ZipFile(VALOUTZIP) as zipfile:
zipfile.extractall()
# unzip happens to valout/
baseline_files = sorted(glob.glob(os.path.join(VALOUTDIR, '*.' + EXTENSION)))
if len(baseline_files) == 0:
raise ValueError('No .{} files found! {}'.format(
EXTENSION, get_files_info_string()))
png_files = sorted(glob.glob('*.png'))
print('Found {} .{} files // {} .png files ({})'.format(
len(baseline_files), EXTENSION, len(png_files),
list(zip(png_files, baseline_files))[:10]))
return sorted(baseline_files)
def main():
baseline_files = get_baseline_files()
start = time.time()
for i, f in enumerate(baseline_files):
decode(f)
if i > 0 and i % 100 == 0:
elapsed = time.time() - start
time_per_img = elapsed / i
print('Compressed {}. Average {}s/img'.format(
i, time_per_img))
print('Done')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# Test this by entering the search string "election" on a command line like this:
# /home/wevote/WeVoteServer/search/query_test_script.py election
from elasticsearch import Elasticsearch
import sys
es = Elasticsearch(["172.31.24.246:9200"], timeout = 120, max_retries = 5, retry_on_timeout = True)
if len(sys.argv) < 2:
print("Usage: %s <search term>" % (sys.argv[0]))
sys.exit(-1)
search_term = sys.argv[1]
#query = { "query": {"match": { "candidate_name": "Joe"}}}
#query = { "query": {"match": { "candidate_name": "Joe"}}}
#query = { "query": { "multi_match": { "type": "phrase_prefix", "query": search_term, "fields": [ "candidate_name", "candidate_twitter_handle", "twitter_name", "measure_subtitle", "measure_text", "measure_title", "office_name", "first_name", "middle_name", "last_name", "party", "organization_name", "organization_twitter_handle", "twitter_description" ] } }}
query = { "query": { "multi_match": { "type": "phrase_prefix", "query": search_term, "fields": [ "google_civic_election_id", "candidate_name", "candidate_twitter_handle", "election_name", "twitter_name", "measure_subtitle", "measure_text", "measure_title", "office_name", "party", "organization_name", "organization_twitter_handle", "twitter_description" ] } }}
query_with_election_date = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"order": "desc"}},
{"_score": {"order": "desc"}}]}
query_with_missing_last_election_date = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"missing": "_last", "order": "desc"}},
{"_score": {"order": "desc"}}]}
query_with_missing_election_date_without_order = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"missing": "1111-11-11"}},
{"_score": {"order": "desc"}}]}
query_with_election_missing_date_value = { "query": { "multi_match": { "type": "phrase_prefix",
"query": search_term,
"fields": [ "election_name^3", "google_civic_election_id",
"candidate_name",
"candidate_twitter_handle", "election_name",
"twitter_name", "measure_subtitle", "measure_text",
"measure_title", "office_name", "party",
"organization_name", "organization_twitter_handle",
"twitter_description", "state_name"],
"slop": 5}},
"sort": [{"election_day_text": {"missing": "1111-11-11", "order": "desc"}},
{"_score": {"order": "desc"}}]}
# Example of querying ALL indexes
res = es.search(body=query)
res_with_election_date = es.search(body=query_with_election_date)
res_with_missing_last_election_date = es.search(body=query_with_missing_last_election_date)
# res_with_missing_election_date_without_order = es.search(body=query_with_missing_election_date_without_order)
# res_with_election_missing_date_value = es.search(body=query_with_election_missing_date_value)
print("Got %d hits from all index search: " % res['hits']['total'])
print("Got %d hits from all index search: " % res_with_election_date['hits']['total'])
print("Got %d hits from all index search: " % res_with_missing_last_election_date['hits']['total'])
# print("Got %d hits from all index search: " % res_with_missing_election_date_without_order['hits']['total'])
# print("Got %d hits from all index search: " % res_with_election_missing_date_value['hits']['total'])
for hit in res['hits']['hits']:
print("------------- RESULT --------------")
for field in hit:
print("%s: %s" % (field, hit[field]))
print("============================================")
print("============================================")
for hit in res_with_election_date['hits']['hits']:
print("------------- RESULT --------------")
for field in hit:
print("%s: %s" % (field, hit[field]))
print("============================================")
print("============================================")
for hit in res_with_missing_last_election_date['hits']['hits']:
print("------------- RESULT --------------")
for field in hit:
print("%s: %s" % (field, hit[field]))
print("============================================")
# print("============================================")
# for hit in res_with_missing_election_date_without_order['hits']['hits']:
# print("------------- RESULT --------------")
# for field in hit:
# print("%s: %s" % (field, hit[field]))
# print("============================================")
# print("============================================")
# for hit in res_with_election_missing_date_value['hits']['hits']:
# print("------------- RESULT --------------")
# for field in hit:
# print("%s: %s" % (field, hit[field]))
# example of querying single index
if True:
res = es.search(index="elections", body={ "query": {"match": { "google_civic_election_id": "5000"}}})
print("Got %d hits from single index search: " % res['hits']['total'])
for hit in res['hits']['hits']:
for field in hit:
print("%s: %s" % (field, hit[field]))
|
import pyxel
import random
# o jogo em si
class jogo():
def __init__(self):
pyxel.init(256,256)
self.fruta1 = Entidade('fruta',random.randint(0,255),random.randint(0,255),8)
self.criatura1 = Entidade('criatura',100,100,9)
self.jogador1 = Entidade('jogador',128,128,3)
self.entidades = [self.jogador1,self.criatura1,self.fruta1]
pyxel.run(self.update,self.draw)
# atualiza o jogo dependendo do tipo da entidade
def update(self):
for i in self.entidades:
# tipo criatura
i.y += 1
if i.tipo == 'criatura':
i.moveraleatoriamente()
if i.x + 5 > 256:
i.x = 256 - 5
if i.x - 5 < 0:
i.x = 0 + 5
if i.y + 5 > 256:
i.y = 256 - 5
if i.y - 5 < 0:
i.y = 0 + 5
# tipo jogador
if i.tipo == 'jogador':
if pyxel.btnp(pyxel.KEY_UP):
i.y -= 10
if pyxel.btn(pyxel.KEY_DOWN):
i.y += 1
if pyxel.btn(pyxel.KEY_LEFT):
i.x -= 1
if pyxel.btn(pyxel.KEY_RIGHT):
i.x += 1
if i.x + 10 > 256:
i.x = 256 - 10
if i.x - 10 < 0:
i.x = 0 + 10
if i.y + 10 > 256:
i.y = 256 - 10
if i.y - 10 < 0:
i.y = 0 + 10
if i.tipo == 'fruta':
if i.x + 2 > 256:
i.x = 256 - 2
if i.x - 2 < 0:
i.x = 0 + 2
if i.y + 2 > 256:
i.y = 256 - 2
if i.y - 2 < 0:
i.y = 0 + 2
# desenha os itens na tela
def draw(self):
pyxel.cls(7)
for i in self.entidades:
if i.tipo == 'jogador':
pyxel.rect(i.x - 10,i.y - 10,20,20,i.cor)
if i.tipo == 'criatura':
pyxel.rect(i.x - 5,i.y - 5,10,10,9)
if i.tipo == 'fruta':
pyxel.rect(i.x - 2, i.y -2,4,4,i.cor)
# clases
class Entidade():
def __init__(self,tipo,x,y,cor):
self.tipo = tipo
self.x = x
self.y = y
self.cor = cor
def moveraleatoriamente(self):
númeroaleatório = random.randint(0,3)
if númeroaleatório == 0:
self.x -= 1
if númeroaleatório == 1:
self.x += 1
if númeroaleatório == 2:
self.y -= 1
if númeroaleatório == 3:
self.y += 1
jogo() |
class washer(object):
"""洗衣机图纸"""
def __init__(self,modleName,width,height):
"""初始化
modleName:型号名称"""
self.modleName = modleName
self.width = width
self.height = height
def print_info(self):
print(f"洗衣机的型号是{self.modleName},高度是{self.height},宽度是{self.width}")
# print(f"洗衣机的高度是{self.height}")
def __del__(self):
print("自动删除")
def __str__(self):
return "海尔洗衣机的说明书"
haier = washer('海尔',500,800)
haier.print_info()
print(haier) |
# coding=utf-8
import math
import torch
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _pair
from function_copy import conv
import torch.nn.functional as F
from .errorInsert import insertError,f2Q,Q2f
from collections import Counter
import numpy as np
import datetime
class _ConvNd(Module):
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias):
super(_ConvNd, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
else:
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
class Conv2d(_ConvNd):
r"""Applies a 2D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{in}, H, W)` and output :math:`(N, C_{out}, H_{out}, W_{out})`
can be precisely described as:
.. math::
\begin{equation*}
\text{out}(N_i, C_{out_j}) = \text{bias}(C_{out_j}) +
\sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{out_j}, k) \star \text{input}(N_i, k)
\end{equation*},
where :math:`\star` is the valid 2D `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`H` is a height of input planes in pixels, and :math:`W` is
width in pixels.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a tuple.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension.
* :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters (of size
:math:`\left\lfloor\frac{\text{out_channels}}{\text{in_channels}}\right\rfloor`).
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
.. note::
Depending of the size of your kernel, several (of the last)
columns of the input might be lost, because it is a valid `cross-correlation`_,
and not a full `cross-correlation`_.
It is up to the user to add proper padding.
.. note::
The configuration when `groups == in_channels` and `out_channels == K * in_channels`
where `K` is a positive integer is termed in literature as depthwise convolution.
In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`, if you want a
depthwise convolution with a depthwise multiplier `K`,
then you use the constructor arguments
:math:`(\text{in_channels}=C_{in}, \text{out_channels}=C_{in} * K, ..., \text{groups}=C_{in})`
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding}[0] - \text{dilation}[0]
* (\text{kernel_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding}[1] - \text{dilation}[1]
* (\text{kernel_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
(out_channels, in_channels, kernel_size[0], kernel_size[1])
bias (Tensor): the learnable bias of the module of shape (out_channels)
Examples::
>>> # With square kernels and equal stride
>>> m = nn.Conv2d(16, 33, 3, stride=2)
>>> # non-square kernels and unequal stride and with padding
>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
>>> # non-square kernels and unequal stride and with padding and dilation
>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
>>> input = torch.randn(20, 16, 50, 100)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
'''
def TMR(self,input1,input2,input3):
t = datetime.datetime.now()
input1 = input1.cpu()
input2 = input2.cpu()
input3 = input3.cpu()
b,c,w,h = input1.size()
input1 = input1.view(b*c, -1)
input2 = input2.view(b*c, -1)
input3 = input3.view(b*c, -1)
raws, cols = input1.size()
input_copy = input1.clone()
print(input_copy.type())
input_cat = torch.stack((input1, input2, input3), dim=0)
t1 = datetime.datetime.now()
print('process',t1-t)
for i in range(raws):
for j in range(cols):
input_list = []
com_tensor = input_cat[:, i, :][:, j]
com_np = com_tensor.detach().numpy()
input_list = com_np.tolist()
b = dict(Counter(input_list))
# 只展示重复元素
repeat_list = [key for key, value in b.items() if value > 1]
if (repeat_list == []):
input_copy[i][j] = round(sum(input_list)/ 3)
else:
input_copy[i][j] = repeat_list[0]
#print(i,j,input_list,repeat_list)
print(datetime.datetime.now()-t1)
print(input_copy.type())
input_copy = torch.reshape(input_copy,(b,c,w,h))
input_copy = input_copy.cuda()
return input_copy
'''
def TMR(self,input1,input2,input3,probs):
input1 = insertError(input1, probs, 16)
input2 = insertError(input2, probs, 16)
input3 = insertError(input3, probs, 16)
b, c, raws, cols = input1.size()
input_copy = input1.clone()
t1 = datetime.datetime.now()
for x in range(b):
for y in range(c):
for i in range(raws):
for j in range(cols):
if (input1[x][y][i][j] != input2[x][y][i][j] and input1[x][y][i][j] != input3[x][y][i][j] and input2[x][y][i][j] != input3[x][y][i][j]):
#print('...',i,j,input1[x][y][i][j],input2[x][y][i][j].item(),input3[x][y][i][j].item())
input_copy[x][y][i][j] = (input1[x][y][i][j] + input2[x][y][i][j] + input3[x][y][i][j]) / 3
else:
if (input1[x][y][i][j] == input2[x][y][i][j]):
input_copy[x][y][i][j] = input1[x][y][i][j]
elif (input1[x][y][i][j] == input3[x][y][i][j]):
input_copy[x][y][i][j] = input1[x][y][i][j]
elif (input2[x][y][i][j] == input3[x][y][i][j]):
input_copy[x][y][i][j] = input2[x][y][i][j]
#print(datetime.datetime.now()-t1)
return input_copy
def new_conv(self,weight_new,input_new,probs,data_width):
weight_new1 = insertError(weight_new, probs, data_width)
input_new1 = insertError(input_new, probs, data_width)
y_noise1 = F.conv2d(input_new1, weight_new1, self.bias, self.stride,self.padding, self.dilation, self.groups)
return y_noise1
def forward(self, input, data_width=8, flagErr=3, probs=1e-7):
weight_new = self.weight.clone()
input_new = input.clone()
bias_new = self.bias.clone()
weight_new, qcode_w = f2Q(weight_new, data_width)
input_new, qcode_i = f2Q(input_new, data_width)
bias_new, _ = f2Q(bias_new, 16, qcode_w + qcode_i)
y_correct = F.conv2d(input_new, weight_new, bias_new, self.stride,self.padding, self.dilation, self.groups)
y_correct = Q2f(y_correct, qcode_i, qcode_w)
#y_correct1 = F.conv2d(input, self.weight, self.bias, self.stride,self.padding, self.dilation, self.groups)
'''
y_noise1 = self.new_conv(weight_new,input_new,probs,data_width)
y_noise2 = self.new_conv(weight_new,input_new,probs,data_width)
y_noise3 = self.new_conv(weight_new,input_new,probs,data_width)
y_noise = self.TMR(y_noise1,y_noise2,y_noise3,probs)
'''
#y = y_noise.detach() + y_correct - y_correct.detach()
return y_correct
|
from ssg.utils import parse_template_boolean_value
def preprocess(data, lang):
data["missing_parameter_pass"] = parse_template_boolean_value(
data, parameter="missing_parameter_pass", default_value=False)
is_default_value = parse_template_boolean_value(
data, parameter="is_default_value", default_value=False)
if is_default_value:
data["config_basename"] = "01-complianceascode-reinforce-os-defaults.conf"
else:
data["config_basename"] = "00-complianceascode-hardening.conf"
return data
|
import psycopg2
import pandas as pd
host_version = "local"
#host_version = "trindade"
in_file_path = "ts_to_be_inserted.csv"
try:
conn = psycopg2.connect("dbname='from_unsupervised_to_supervised' user='postgres' host='localhost' password='admin'")
conn.autocommit = True
except:
print "unable to connect to the database"
sys.exit(0)
cursor = conn.cursor()
def insert_into_db():
df = pd.read_csv(in_file_path)
for idx, row in df.iterrows():
csv_path = row["csv_path"]
if host_version == "trindade":
year = row["date_start"].split("/")[-1]
month = row["date_start"].split("/")[0]
csv_path = "/home/localuser/diegoximenes_mestrado/change_point_detection/input/" + str(year) + "_" + str(month).zfill(2) + "/" + row["server"] + "/" + row["mac"] + ".csv"
sql = "INSERT INTO time_series(mac, server, csv_path, date_start, date_end) VALUES('%s', '%s', '%s', '%s', '%s')"%(row["mac"], row["server"], csv_path, row["date_start"], row["date_end"])
try: cursor.execute(sql)
except:
print "error on insertion: " + sql
sys.exit(0)
insert_into_db()
|
#====================================================================
# Motor class that remembers parameters for sizing
#====================================================================
class motors:
def __init__(self, data={}, nseg=0):
self.ngroups = 0
self.groups = {}
ngrp = 0
#loop over motor groups, find design parameters
if(bool(data)):
for key in sorted(data):
self.groups[ngrp] = motor_group(data[key], key, nseg)
ngrp = ngrp + 1
self.ngroups = ngrp
self.nmotors = 0 # total number of motors
return None
#====================================================================
# Motor group details
#====================================================================
class motor_group:
#====================================================================
# function to initialize all segments
#====================================================================
def __init__(self, data, key, nseg):
#=======================================================================
# geometric parameters
#=======================================================================
self.cruise_efficiency = data['cruise_efficiency']
self.hover_efficiency = data['hover_efficiency']
self.key = key
self.rotor_group_ids = [] # which rotor groups are used to size this motor?
self.p_ins = numpy.zeros(nseg)
self.nmotors = 0
|
from perf.model.configuration import Configurations
from utility import fab_util
class DistributeEnv(Configurations):
def __init__(self, config_file, **kwargs):
'''@param config_file: configuration file, must be a properties file'''
super(DistributeEnv, self).__init__(config_file, **kwargs)
self.set_fabric_env()
def set_fabric_env(self):
self._set_attr('test_machine_port', '22')
self._set_attr('test_machine_username', 'root')
if not hasattr(self, 'test_machine_pubkey') and not hasattr(self, 'test_virtual_machine_password'):
print 'pubkey and password must have one'
exit(1)
if not hasattr(self, 'test_machine_hosts'):
print 'Test machine hosts is required'
exit(1)
elif type(self.test_machine_hosts) is str:
self.test_machine_hosts = [self.test_machine_hosts, ]
if hasattr(self, 'test_machine_username'): fab_util.set_user(self.test_machine_username)
if hasattr(self, 'test_machine_port'): fab_util.set_port(self.test_machine_port)
if hasattr(self, 'test_machine_pubkey'): fab_util.set_key_file(self.test_machine_pubkey)
if hasattr(self, 'test_virtual_machine_password'): fab_util.set_password(self.test_virtual_machine_password)
|
import db
def proccessDiagnosis(userSymptoms):
results = []
for illness in db.illnesses:
title = illness['title']
symptoms = illness['symptoms']
# verificar sintomas da doenca com os do usuario, e retornar somente sintomas que coincidem
matches = list(set(symptoms) & set(userSymptoms))
userSymptomsCount = len(matches)
illnessSymptomsCount = len(symptoms)
if userSymptomsCount > 0:
probability = (userSymptomsCount / illnessSymptomsCount) * 100
results.append({
"title": title,
"probability": round(probability, 2),
"symptoms": matches,
})
return results
|
#!/usr/bin/python
# Checks whether the given string can be a valid palindrome or not
def isvalidpalindrome(s):
map = {}
for literal in s:
if literal not in map:
map[literal] = 1
else:
map[literal] *= -1
odd = 0
for item in map:
if map[item] == 1:
odd += 1
if odd > 1:
return False
else:
return True
s = "nmana"
print isvalidpalindrome(s) |
# -*- coding: utf-8 -*-
# @Author : 杨佳
# @Time : 2020/11/18 15:44
# @File : test_company.py
import pytest
import time
from common.deal_excel import DealExcel
from pages.company_page import CompanyPage
from common.log import do_log
import common.file_contrast as fc
# do_excel = DealExcel()
# test_data = do_excel.read('companya')
# 测试文件上传用例
@pytest.mark.success
# @pytest.mark.parametrize("data",test_data[0])
def test_02_picture_upload(browser):
c = CompanyPage(browser)
# 上传图片
c.get()
time.sleep(1)
c.upload_image([r'F:\Code\python\MeetingManagement\resource\upload_source\1.png'])
time.sleep(1)
count = 0
acture_val = ''
while count < 60:
# 执行完上传后,每隔1s 去判断acture_val 是否获取到
# 如果1min 后还没有获取到就表示上传失败了
acture_val = c.get_upload_info()
if acture_val:
break
count += 1
time.sleep(1)
try:
assert '上传成功' in acture_val
do_log.info("上传测试用例通过")
except AssertionError as e:
c.scapture('upload_pic')
do_log.error(f"上传测试用例不通过:{e}")
raise e
@pytest.mark.success
def test_03_picture_show(browser):
c = CompanyPage(browser)
c.get()
picurl = c.get_picurl()
filepath = fc.load_file(picurl,r'F:\Code\python\MeetingManagement\resource\load_source\\')
acture_val = fc.picture_contrast(filepath,r'F:\Code\python\MeetingManagement\resource\upload_source\1.jpg')
try:
assert True == acture_val
do_log.info("图片展示用例通过")
except AssertionError as e:
c.scapture('show_pic')
do_log.error(f"图片展示用例不通过:{e}")
raise e
|
import math as m
from scipy import signal
import control as c
class General_aprox(object):
def normalizacion(self):
if (self.tipo == "LP"):
self.wsn=((self.ws)/(self.wp))
elif (self.tipo == "HP"):
self.wsn=((self.wp)/(self.ws))
elif(self.tipo == "BP"):
self.wsn=(self.wsMas-self.wsMenos)/(self.wpMas-self.wpMenos)
elif(self.tipo=="BR"):
self.wsn=(self.wpMas-self.wpMenos)/(self.wsMas-self.wsMenos)
else:
wsn=1
return;
def __init__(self, As, Ap, wp, ws, wpMenos, wpMas, wsMenos, wsMas,orden, tipo, a):
super().__init__()
self.As=As
self.Ap=Ap
self.wp=wp
self.ws=ws
self.n=orden #ORDEN EN LA QUE SE QUIERE EL FILTRO
self.wpMenos=wpMenos
self.wpMas=wpMas
self.wsMenos=wsMenos
self.wsMas=wsMas
self.tipo=tipo
self.wsn=0
self.b=0 #ancho de banda
self.a=a #porcentaje de desnormalizacion
self.polos=[]
self.zeros=[]
if (self.tipo == "BP") or (self.tipo == "BR"):
self.b=(self.wpMas-self.wpMenos)/m.sqrt(self.wpMas*self.wpMenos)
self.normalizacion()
return;
def denormalization (type, W, n, poles=None, zeros=None): #type se refiere al tipo de filtro que se desea
if type == "LP":
s = c.tf([1,0],[W])
tf = c.tf([1],[1])
if zeros != None:
for k in range (0,len(zeros)):
tf = tf * (s-zeros(k))
for k in range (0,len(poles)):
tf = tf * 1/(s-poles[k])
#print(tf.num[0][0])
tf = signal.TransferFunction(tf.num[0][0], tf.den[0][0])
elif type == "HP":
s = c.tf([W],[1,0])
tf = c.tf([1],[1])
if zeros != None:
for k in range (0,len(zeros)):
tf = tf * (s-zeros(k))
for k in range (0,len(poles)):
tf = tf * 1/(s-poles[k])
#print(tf.num[0][0])
tf = signal.TransferFunction(tf.num[0][0], tf.den[0][0])
return tf
if __name__ == "__main__":
ex=General_aprox.denormalization("LP", 100, 5, [-3.])
|
##In Two ways we can check the number is even or odd:
"""
num=int(input("enter the number:"))
number=int(num/2)*2
if number==num:
print("its even number",num)
else:
print("its odd number",num)
"""
num=int(input("enter the number:"))
number=int(num/2)*2
num1=num-number
if num1==0:
print("its even number",num)
else:
print("odd number",num)
|
#! /usr/bin/env python2.7
from django.views.generic import TemplateView
from django.shortcuts import redirect
from django.forms import ModelForm
from gifsong.models import gifsong
class GifSongForm(ModelForm):
class Meta:
model = gifsong
fields = ['image_url', 'audio_url']
class showgifsong(TemplateView):
template_name = 'showsong.html'
def get(self, request, *args, **kwargs):
gvidid = request.GET.get('gvid')
agifsong = None
if(gvidid):
agifsong = gifsong.objects.get(id=gvidid)
if (agifsong == None):
if(gifsong.objects.order_by('?')):
agifsong = gifsong.objects.order_by('?')[0]
context = {
'song' : agifsong,
}
return self.render_to_response(context)
class addgifsong(TemplateView):
template_name = 'createsong.html'
def get(self, request, *args, **kwargs):
form = GifSongForm()
context = {
'form' : form,
}
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
form = GifSongForm(request.POST)
if form.is_valid():
song = form.save()
return redirect('/show?gvid=' + str(song.id))
return self.render_to_response({'form': GifSongForm() })
|
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def main():
filename1 = sys.argv[1]
filename2 = sys.argv[2]
File1CSV = pd.read_csv(filename1, sep=' ', header=None, index_col=1,names=['lang', 'page', 'views', 'bytes'])
data1 = File1CSV.sort_values(by=['views'], ascending = False)
File2CSV = pd.read_csv(filename2, sep=' ', header=None, index_col=1,names=['lang', 'page', 'views', 'bytes'])
data2 = File2CSV.sort_values(by=['views'], ascending = False)
data1['views2'] = data2['views']
plt.figure(figsize=(10, 6)) # change the size to something sensible
plt.subplot(1, 2, 1) # subplots in 1 row, 2 columns, select the first
plt.plot(data1['views'].values) # build plot 1
plt.title("Popularity Distribution")
plt.xlabel("Rank")
plt.ylabel("Views")
plt.subplot(1, 2, 2) # ... and then select the second
plt.scatter(data1['views'], data1['views2']) # build plot 2
plt.title("Daily Correlation")
plt.xlabel("Day 1 views")
plt.ylabel("Day 2 views")
plt.xscale('log')
plt.yscale('log')
# plt.show()
plt.savefig('wikipedia.png')
if __name__ == '__main__':
main()
# python3 create_plots.py pagecounts-20190509-120000.txt pagecounts-20190509-130000.txt
# REFERENCED:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sort_values.html |
import array
import urllib2 as url2
import urllib as url1
import sys
import dropbox
#from Selenium import webdriver
fileName = sys.argv[1]
app_key = 'uqzp24pob7zakxn'
app_secret = 's3pdcfy7zhycxcv'
access_type = "dropbox"
flow = dropbox.client.DropboxOAuth2FlowNoRedirect(app_key, app_secret)
authorize_url = flow.start()
"""
session = dropbox.session.DropboxSession(app_key,app_secret,access_type)
request_token = session.obtain_request_token()
webbrowser.open(authorize_url)
selenium.
access_token = "Bnv5cWNiXqUAAAAAAAAAAbVZNYMPAi9QoTX-OODenus"
"""
print '1. Go to:\n' + authorize_url
print '2. Click "Allow" (you might have to log in first)'
print '3. Copy the authorization code.'
code = raw_input("Enter the authorization code here: ").strip()
access_token, user_id = flow.finish(code)
clientDrop = dropbox.client.DropboxClient(access_token)
f = clientDrop.get_file(fileName)
out = open(fileName, 'w')
out.write(f.read())
out.close()
with open(fileName, 'r') as f:
data = f.read()
publicKey = int(data.split('\n', 1)[0])
import serial
import sys
import time
def get_stats(base):
ave = 0
for n in base:
ave += n
ave = ave/len(base)
std_dev = 0
for n in base:
std_dev += (n-ave)**2
std_dev = (std_dev/len(base))**.5
return ave, std_dev
def rough_equal(num1,num2,tol):
return num1 > num2 - tol and num1 < num2 + tol
def sync(s,base,tol):
delay = []
light_on = False
avg = 0
while True:
try:
value = s.readline()
n = float(value)
if light_on:
if rough_equal(n,base,tol):
delay.append(time.time())
light_on = False
else:
if n > base + tol:
delay.append(time.time())
light_on = True
except:
pass
if len(delay) >= 12:
break
temp = []
for i in range(len(delay)-1):
temp.append(delay[i+1]-delay[i])
return get_stats(temp)
def wait_delay(s,delay):
start = time.time()
while delay > time.time() - start:
s.readline()
print "Opening serial port"
try:
s = serial.Serial('/dev/tty.usbmodem1421',9600,timeout=0.1)
except:
print "Could not open port, exiting now.."
sys.exit()
base = []
input_data = []
i = 0
n_base_reading = 20
key_length = 3
runs = n_base_reading + key_length
delay = .1
try:
print "Calibrating..."
while True:
try:
reading = float(s.readline())
if i < n_base_reading:
base.append(reading)
if i == n_base_reading:
zero, tol = get_stats(base[1:])
tol += 50
print "Calibrated to ", zero, "+/-", tol
print "Enter Key now please"
delay,std = sync(s,zero,tol)
wait_delay(s,delay)
if i > n_base_reading:
if reading > zero + tol:
input_data.append(1)
else:
input_data.append(0)
if i%2 == 0:
wait_delay(s,delay-std)
else:
wait_delay(s,delay+std)
if i >= runs:
break
i += 1
except:
pass
#
#count = 0.
#for i in range(key_length):
# if input_data[i] == original[i]:
# count += 1
# print original[i], input_data[i], input_data[i] == original[i]
#print str(count/key_length) + "%"
finally:
s.close()
def data_to_binary(input_data):
return int(''.join(str(i) for i in input_data),2)
privateKey = data_to_binary(input_data) # will be reading this from the LAZORSDNRDNAKDN
s
wholeThing = privateKey * publicKey
key_str = "{0:b}".format(wholeThing)
key_arr = array.array('B',[])
for c in key_str:
key_arr.append(int(c))
encryptedContent = str(data[2:])
encrypted = array.array('B', encryptedContent)
isLonger = True
tmp_arr = key_arr
while isLonger:
if len(encrypted) > len(key_arr):
howLong = len(encrypted) - len(key_arr)
key_arr = key_arr + tmp_arr
else:
isLonger = False
for i in range(len(encrypted)):
encrypted[i] ^= key_arr[i]
out = open('decrypted.txt', 'w')
out.write(encrypted.tostring())
out.close()
|
from django.shortcuts import render
# Create your views here.
from django.shortcuts import HttpResponse
# Create your views here.
def home(request):
return HttpResponse("hello India - welcome -!! Its jangp webpage") |
import os
import time
import shutil
PHOTO_EXT = ("jpg", "jpeg", "png", "psd", "tif")
VIDEO_EXT = ("mp4", "avi", "mov")
DOCUMENT_EXT = ("pdf", "docx", "doc", "txt")
def main():
magic_folder_path = "/Users/derek/Desktop/test"
# while not os.path.exists(magic_folder_path):
# magic_folder_path = input("Path to folder to sort:")
photo_f = magic_folder_path + "/Photos"
video_f = magic_folder_path + "/Videos"
document_f = magic_folder_path + "/Documents"
other_f = magic_folder_path + "/Other"
while True:
file_list = os.listdir(magic_folder_path)
num_files = len(file_list)
for file in file_list:
nm, ext = os.path.splitext(file)
ext = ext[1:]
if ext in PHOTO_EXT:
if not os.path.exists(photo_f):
os.makedirs(photo_f)
shutil.move(magic_folder_path + "/" + file, photo_f)
elif ext in VIDEO_EXT:
if not os.path.exists(video_f):
os.makedirs(video_f)
shutil.move(magic_folder_path + "/" + file, video_f)
elif ext in DOCUMENT_EXT:
if not os.path.exists(document_f):
os.makedirs(document_f)
shutil.move(magic_folder_path + "/" + file, document_f)
while num_files == len(os.listdir(magic_folder_path)):
time.sleep(5)
if __name__ == '__main__':
main()
|
import operator
from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from .serializers import UserSignupSerializer
from django.contrib.auth import get_user_model
from .models import Movie, User, Rating, Tempmovieti, Recommendationmovie, Recommendationmovieti
import pandas as pd
import numpy as np
import pymysql
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from ast import literal_eval
from collections import Counter
import json
from sklearn.metrics.pairwise import cosine_similarity
@api_view(['GET'])
@authentication_classes([JSONWebTokenAuthentication])
@permission_classes([IsAuthenticated])
def analysis_user_favorite(request):
movie_cnt = 0
average_rate = 0
most_rate = {}
cnt_rate = {
1: 0,
2: 0,
3: 0,
4: 0,
5: 0
}
data = {
"rated_movie_cnt": movie_cnt,
"average_rate": average_rate,
"most_rate": most_rate,
"cnt_rate": cnt_rate,
}
user = request.user
if Rating.objects.filter(uid_id=user.uid):
# 유저가 평가한 영화 가져오기
movie_list = Rating.objects.filter(uid_id=user.uid)
rate_list = [] # 평가 점수 리스트
for i in range(len(movie_list)):
rate_list.append(movie_list[i].rating)
# 평가한 영화의 수
movie_cnt = len(movie_list)
data["rated_movie_cnt"] = movie_cnt
# 별점 평균
n = np.array(rate_list)
average_rate = np.mean(n)
average_rate = np.around(average_rate, 2)
data["average_rate"] = average_rate
# 별점 빈도수
cnt = Counter(rate_list)
temp = dict(cnt)
most_rate = dict(cnt.most_common(1))
data["most_rate"] = most_rate
data["cnt_rate"] = cnt_rate
# print(cnt_rate)
# print(most_rate) # (점수, 개수) 형식
for key, value in temp.items():
data["cnt_rate"][key] = value
# print(data)
return Response(data, status=status.HTTP_200_OK)
else:
return Response(data, status=status.HTTP_200_OK)
# 영화 선호 태그
# 선호 국가
# 선호 장르 -> 몇편인지
@api_view(['GET'])
@authentication_classes([JSONWebTokenAuthentication])
@permission_classes([IsAuthenticated])
def analysis_movie_favorite(request):
# DB에 접속, 필요한 정보 명시
conn = pymysql.connect(
user='jiahn',
password='jiahn1234',
database='bigdatapjt',
host='J5B305.p.ssafy.io',
port=3306,
charset='utf8',
autocommit=True,
cursorclass=pymysql.cursors.DictCursor
)
# sql문 실행하기
cursor = conn.cursor()
# 현재 user의 uid
user_id = request.user.uid
sql = f'SELECT t1.movieid, t1.tmdb_id, t1.title, t1.genre, t1.release_date, t1.production_countries, t1.runtime, t1.vote_average, t1.vote_count, t1.cast, t1.keywords FROM bigdatapjt.movie as t1 where t1.movieid in (select t2.movieid from bigdatapjt.rating t2 where t2.uid_id={user_id});'
cursor.execute(sql)
# 데이터 받아오기
queryset = cursor.fetchall()
# db닫기
cursor.close()
conn.close()
# 데이터 프레임으로 만들기
df = pd.DataFrame(queryset)
# 2개 이상 나온 장르만 가져옴
count_vect = CountVectorizer(min_df=2)
m = count_vect.fit_transform(df['genre'])
# print(m.toarray())
# print(count_vect.vocabulary_)
# 각 단어가 가지는 열의 위치를 열(value) 순서대로 바꾸기
dic = sorted(count_vect.vocabulary_.items(), key=operator.itemgetter(1))
# 다시 딕셔너리로 바꿔줌
genre_dict = dict(dic)
# 단어의 빈도수 행렬나온것을 열기준으로 합해서 리스트로 변환함
cnt_list = m.toarray().sum(axis=0).tolist()
# key만 뽑아내서 리스트로 만들어줌
arr = list(genre_dict.keys())
# 딕셔너리 value값을 빈도로 변경해줌
for i in range(len(cnt_list)):
genre_dict[arr[i]] = cnt_list[i]
df['keywords'] = df['keywords'].str.replace(" ", "_")
# print(df['keywords'])
# 2번 이상 나온 키워드만 가져오기
cnt_vect = CountVectorizer(min_df=4)
n = cnt_vect.fit_transform(df['keywords'])
# print(n.toarray())
# print(cnt_vect.vocabulary_)
keywords_dict = dict(
sorted(cnt_vect.vocabulary_.items(), key=operator.itemgetter(1)))
# 단어의 빈도수 행렬나온것을 열기준으로 합해서 리스트로 변환함
cnt_keyword = n.toarray().sum(axis=0).tolist()
# key만 뽑아내서 리스트로 만들어줌
arr_county = list(keywords_dict.keys())
# 딕셔너리 value값을 빈도로 변경해줌
for i in range(len(cnt_keyword)):
keywords_dict[arr_county[i]] = cnt_keyword[i]
# 나라 전처리
df['production_countries'] = df['production_countries'].apply(literal_eval)
df['production_countries'] = df['production_countries'].apply(
lambda x: [y['name'] for y in x])
# 데이터 타입 변경
df = df.astype({'production_countries': 'str'})
df['production_countries'] = df['production_countries'].str.replace(
" ", "_")
country_vect = CountVectorizer(min_df=2)
c = country_vect.fit_transform(df['production_countries'])
# print(country_vect.vocabulary_)
# print(c.toarray())
country_dict = dict(
sorted(country_vect.vocabulary_.items(), key=operator.itemgetter(1)))
# 단어의 빈도수 행렬나온것을 열기준으로 합해서 리스트로 변환함
cnt_country = c.toarray().sum(axis=0).tolist()
# key만 뽑아내서 리스트로 만들어줌
arr_county = list(country_dict.keys())
# 딕셔너리 value값을 빈도로 변경해줌
for i in range(len(cnt_country)):
country_dict[arr_county[i]] = cnt_country[i]
# print(genre_dict)
# print(keywords_dict)
# print(country_dict)
data = {
"genre_dict": genre_dict,
"keywords_dict": keywords_dict,
"country_dict": country_dict
}
return Response(data, status=status.HTTP_200_OK)
# 회원가입
@api_view(['POST'])
def signup(request):
password = request.data.get('password')
password_confirmation = request.data.get('passwordConfirmation')
# 비밀번호 일치하지않으면 저장하지 않음
if password != password_confirmation:
return Response({'error': '비밀번호가 일치하지 않습니다.'}, status=status.HTTP_400_BAD_REQUEST)
# 닉네임은 이메일 @ 앞부분 잘라서 넣기
temp = request.data.get('email').split('@')
nickname_first = temp[0]
serializer = UserSignupSerializer(data=request.data)
# auth_user에 저장
if serializer.is_valid():
user = serializer.save()
user.set_password(request.data.get('password'))
user.nickname = nickname_first
user.save()
# 유저 생성할 때, movieti 결과 도출 테이블에도 함께 생성
Tempmovieti.objects.create(
uid=User.objects.get(email=request.data.get('email')).uid
)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
# print(serializer.errors)
# print(serializer.error_messages)
return Response(serializer.errors, status=status.HTTP_406_NOT_ACCEPTABLE)
# 이메일 중복 api 만들기
@api_view(['POST'])
def checkEmail(request):
user_email = request.data.get('user_email')
try:
# 중복된 경우
u_email = User.objects.get(email=user_email)
except:
# 중복되지 않는 경우
u_email = None
if u_email is None:
return Response({'success': '사용가능한 이메일입니다.'}, status=status.HTTP_200_OK)
else:
return Response({'error': '동일한 이메일이 존재합니다.'}, status=status.HTTP_400_BAD_REQUEST)
# 영화 설문조사하기
@api_view(['POST'])
def survey_result(request):
if request.data.get('result'):
# 유저 이메일이랑 결과 받아서
user_email = request.data.get('id')
survey_result = request.data.get('result')
# 딕셔너리에서 key, value쌍 꺼내서 rating테이블에 생성하기
for param_tmdb, rating in survey_result.items():
if(Movie.objects.get(tmdb_id=param_tmdb) != None and User.objects.get(email=user_email) != None):
movie_id = Movie.objects.get(tmdb_id=param_tmdb).movieid
user_id = User.objects.get(email=user_email).uid
if Rating.objects.filter(uid_id=user_id, movieid=movie_id):
return Response({'error': '동일한 영화를 이미 평가했습니다.'}, status=status.HTTP_409_CONFLICT)
Rating.objects.create(
movieid=Movie.objects.get(tmdb_id=param_tmdb),
uid=User.objects.get(email=user_email),
rating=rating,
survey=True
)
# user테이블에 설문했는지 안했는지 업데이트
user = User.objects.get(email=user_email)
user.surveyed = True
user.save()
# 설문조사하고 콘텐츠기반 필터링 적용하기
result = survey_result_func(user.uid)
result_insert(user, result)
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_204_NO_CONTENT)
# 다시 설문조사하기
@api_view(['DELETE'])
@authentication_classes([JSONWebTokenAuthentication])
@permission_classes([IsAuthenticated])
def survey_reset(request):
# 유저가 설문조사를 통해 평가한 영화만 삭제하기
user = Rating.objects.filter(uid=request.user.uid, survey=True)
user.delete()
return Response(status=status.HTTP_200_OK)
def survey_result_func(userid):
with open("./example.json", "r", encoding="utf8") as f:
contents = f.read() # string 타입
json_data_realreal = json.loads(contents)
df4 = pd.DataFrame(json_data_realreal)
################################################### DB에서 rating 테이블 불러오기############################################################
conn = pymysql.connect(
user='jiahn',
password='jiahn1234',
database='bigdatapjt',
host='J5B305.p.ssafy.io',
port=3306,
charset='utf8',
autocommit=True,
cursorclass=pymysql.cursors.DictCursor
)
cursor = conn.cursor()
sql = 'select * from bigdatapjt.rating'
cursor.execute(sql)
res = cursor.fetchall()
conn.close()
df3 = pd.DataFrame(res)
me = userid # 실제로는 접속해 있는 유저의 uid_id
df3 = df3.loc[df3['uid_id'] == userid, ['movieid', 'rating']].sort_values('rating', ascending=False)
# print(df3)
movieid_lst = [df3['movieid']]
# print(movieid_lst)
good_movie = []
bad_movie = []
movie_num = []
for c, i in df3.iterrows():
if i.rating >= 3:
good_movie.append(i.movieid)
movie_num.append(i.movieid)
else:
bad_movie.append(i.movieid)
movie_num.append(i.movieid)
dataframe_table = pd.DataFrame()
# print(good_movie,bad_movie)
##########################################################################################################################################
# ##############################################가중치를 계산한 평점 ######################################################################
C = df4['vote_average'].mean()
m = df4['vote_count'].quantile(0.6)
def weighted_vote_average(record):
v = record['vote_count']
R = record['vote_average']
return ((v/(v+m)) * R) + ((m/(m+v)) * C)
df4['weighted_vote'] = df4.apply(weighted_vote_average, axis=1)
# ###########################################################################################################################################
df4['keywords'] = df4['keywords'].apply(literal_eval)
df4['genre'] = df4['genre'].apply(literal_eval)
df4['genre'] = df4['genre'].apply(lambda x: [y['name'] for y in x])
df4['keywords'] = df4['keywords'].apply(lambda x: [y['name'] for y in x])
df4['recommend_item'] = df4['keywords'].apply(lambda x: ' '.join(x))
df4['recommend_item'] += df4['genre'].apply(lambda x: ' '.join(x))
# print(df4['keywords'])
# print(type(df4['keywords']))
# input_movie에 rating한 영화 이름집어넣기
for i in good_movie:
input_movie = i
input_title = df4[df4['movieid'] == input_movie]['title']
tfidf_vec = TfidfVectorizer(ngram_range=(1, 5))
# print('------------------------------------------------------'*5)
# print(f'키워드+장르 - TfidfVectorizer {input_title}')
# print('------------------------------------------------------'*5)
tfidf_matrix = tfidf_vec.fit_transform(df4['recommend_item'])
genres_similarity = cosine_similarity(tfidf_matrix, tfidf_matrix)
# #######################################################별점이 3점 이상이면 #################################################
# print('너무 좋아!')
# print('------------------------------------------------------'*5)
similar_index = np.argsort(-genres_similarity)
movie_index = df4[df4['movieid'] == input_movie].index.values
similar_movies = similar_index[movie_index, :20]
similar_movies_index = similar_movies.reshape(-1)
# print(df4.loc[similar_movies_index, ['title', 'movieid', 'weighted_vote']].sort_values(
# 'weighted_vote', ascending=False).head(3))
dataframe_table = pd.concat([dataframe_table, df4.loc[similar_movies_index, [
'title', 'movieid', 'weighted_vote']].sort_values('weighted_vote', ascending=False).head(5)])
# print('------------------------------------------------------'*5)
for i in bad_movie:
input_movie = i
input_title = df4[df4['movieid'] == input_movie]['title']
tfidf_vec = TfidfVectorizer(ngram_range=(1, 5))
# print('------------------------------------------------------'*5)
# print(f'키워드+장르 - TfidfVectorizer {input_title}')
# print('------------------------------------------------------'*5)
tfidf_matrix = tfidf_vec.fit_transform(df4['recommend_item'])
genres_similarity = cosine_similarity(tfidf_matrix, tfidf_matrix)
# #########################################################별점이 2점 이하이면######################################################
# print('------------------------------------------------------'*5)
# print('너무 싫어!')
# print('------------------------------------------------------'*5)
similar_index = np.argsort(genres_similarity)
movie_index = df4[df4['movieid'] == input_movie].index.values
similar_movies = similar_index[movie_index, :20]
similar_movies_index = similar_movies.reshape(-1)
# print(df4.loc[similar_movies_index, ['title', 'movieid', 'weighted_vote']].sort_values(
# 'weighted_vote', ascending=False).head(3))
dataframe_table = pd.concat([dataframe_table, df4.loc[similar_movies_index, [
'title', 'movieid', 'weighted_vote']].sort_values('weighted_vote', ascending=False).head(5)])
# # # Create your views here.
# print('------------------------------------------------------'*5)
# print('------------------------------------------------------'*5)
dataframe_table = dataframe_table.sort_values(
'weighted_vote', ascending=False)[:15]
# print(dataframe_table)
result = []
for c, i in dataframe_table.iterrows():
if i.movieid not in result and i.movieid not in movie_num:
result.append(i.movieid)
# print(result)
return result[:10]
#########################################################설문 기반 추천 끝!#########################################################################
def result_insert(user, result):
for i in result:
Recommendationmovie.objects.create(
movieid=Movie.objects.get(movieid=i),
uid=user
)
def recomm_movieti(myuser):
with open("./example.json", "r", encoding="utf8") as f:
contents = f.read() # string 타입
json_data_realreal = json.loads(contents)
df1 = pd.DataFrame(json_data_realreal)
my_movieTi = myuser.movieti
# print("====================="*5)
# print(my_movieTi)
conn = pymysql.connect(
user='jiahn',
password='jiahn1234',
database='bigdatapjt',
host='J5B305.p.ssafy.io',
port=3306,
charset='utf8',
autocommit=True,
cursorclass=pymysql.cursors.DictCursor
)
cursor = conn.cursor()
sql = f'select * from bigdatapjt.rating where movieti = "{my_movieTi}"'
cursor.execute(sql)
res = cursor.fetchall()
# conn.close()
df2 = pd.DataFrame(res)
# print(df1)
# print('------------------------------------------------------'*5)
# print(df2)
# print('------------------------------------------------------'*5)
user_movie_rating = pd.merge(df2, df1, on='movieid')
# print(user_movie_rating.loc[:,['ratingid','rating','movieid','uid_id','movieti']])
# movie_user_rating = user_movie_rating.pivot_table('rating',index='title',columns='uid_id')
user_movie_rating = user_movie_rating.pivot_table(
'rating', columns='title', index='uid_id')
# print(movie_user_rating)
# print('------------------------------------------------------'*5)
# print(user_movie_rating)
# movie_user_rating.fillna(0,inplace=True)
user_movie_rating.fillna(0, inplace=True)
# print(movie_user_rating)
# print('------------------------------------------------------'*5)
# print(user_movie_rating)
# similar_movie = cosine_similarity(movie_user_rating,movie_user_rating)
similar_user = cosine_similarity(user_movie_rating, user_movie_rating)
# print(similar_movie)
# print(similar_user)
# movie_df = pd.DataFrame(data=similar_movie,index=movie_user_rating.index,columns=movie_user_rating.index)
user_df = pd.DataFrame(
data=similar_user, index=user_movie_rating.index, columns=user_movie_rating.index)
# print(movie_df)
# print('------------------------------------------------------'*5)
# print(user_df)
ans = user_df[[myuser.uid]].sort_values(by=myuser.uid, ascending=False)[
:6] # 1대신에 request.user.uid
# print(ans)
similar_userlist = []
for c, i in ans.iterrows():
similar_userlist.append(c)
similar_userlist = similar_userlist[1:4]
# print(similar_userlist)
# print('------------------------------------------------------'*5)
#####################################################비슷한 유저3명이 좋아한 영화들 찾기(_별점 4점이상_)#######################################################
# print('비슷한 유저3명이 좋아한 영화들 찾기(_별점 4점이상_)')
# print('------------------------------------------------------'*5)
similar_userlist = tuple(similar_userlist)
# print(f'{similar_userlist}')
# cursor = conn.cursor()
sql = f'select * from bigdatapjt.rating where uid_id in {similar_userlist} and rating >=4'
cursor.execute(sql)
res = cursor.fetchall()
df5 = pd.DataFrame(res)
# print(df5)
user_movie_goodlist = []
for c, i in df5.iterrows():
user_movie_goodlist.append(i.movieid)
# print(user_movie_goodlist)
# print('------------------------------------------------------'*5)
user_movie_goodlist = tuple(user_movie_goodlist)
sql = f'select * from bigdatapjt.movie where movieid in {user_movie_goodlist}'
cursor.execute(sql)
res = cursor.fetchall()
conn.close()
df6 = pd.DataFrame(res)
# print(df6)
user_good_movielist = []
for c, i in df6.iterrows():
user_good_movielist.append(i.movieid)
# print(user_good_movielist)
# print('------------------------------------------------------'*5)
# print('------------------------------------------------------'*5)
################################################## 추천알고리즘##########################################################################
# print('추천알고리즘')
# print('------------------------------------------------------'*5)
C = df1['vote_average'].mean()
m = df1['vote_count'].quantile(0.6)
def weighted_vote_average(record):
v = record['vote_count']
R = record['vote_average']
return ((v/(v+m)) * R) + ((m/(m+v)) * C)
df1['weighted_vote'] = df1.apply(weighted_vote_average, axis=1)
# ###########################################################################################################################################
df1['keywords'] = df1['keywords'].apply(literal_eval)
df1['genre'] = df1['genre'].apply(literal_eval)
df1['genre'] = df1['genre'].apply(lambda x: [y['name'] for y in x])
df1['keywords'] = df1['keywords'].apply(lambda x: [y['name'] for y in x])
df1['recommend_item'] = df1['keywords'].apply(lambda x: ' '.join(x))
df1['recommend_item'] += df1['genre'].apply(lambda x: ' '.join(x))
# user_movie_goodlist = list(user_movie_goodlist)
# input_movie에 rating한 영화 이름집어넣기
user_good_movielist_table = pd.DataFrame()
for i in user_good_movielist:
input_movie = i
input_title = df1[df1['movieid'] == input_movie]['title']
tfidf_vec = TfidfVectorizer(ngram_range=(1, 5))
# print('------------------------------------------------------'*5)
# print(f'키워드+장르 - TfidfVectorizer {input_title}')
# print('------------------------------------------------------'*5)
tfidf_matrix = tfidf_vec.fit_transform(df1['recommend_item'])
genres_similarity = cosine_similarity(tfidf_matrix, tfidf_matrix)
# #######################################################별점이 3점 이상이면 #################################################
# print('너무 좋아!')
# print('------------------------------------------------------'*5)
similar_index = np.argsort(-genres_similarity)
movie_index = df1[df1['movieid'] == input_movie].index.values
similar_movies = similar_index[movie_index, :20]
similar_movies_index = similar_movies.reshape(-1)
# print(df1.loc[similar_movies_index, ['title', 'movieid', 'weighted_vote']].sort_values(
# 'weighted_vote', ascending=False).head(5))
user_good_movielist_table = pd.concat([user_good_movielist_table, df1.loc[similar_movies_index, [
'title', 'movieid', 'weighted_vote']].sort_values('weighted_vote', ascending=False).head(5)])
# print('------------------------------------------------------'*5)
# print(user_good_movielist_table.sort_values(
# by='weighted_vote', ascending=False))
user_good_movielist_table = user_good_movielist_table.sort_values(
by='weighted_vote', ascending=False)
res = []
for c, i in user_good_movielist_table.iterrows():
if i.movieid not in res:
res.append(i.movieid)
# print(res)
# print('------------------------------------------------------'*5)
return res[:10]
def movieti_result_insert(user, result):
for i in result:
Recommendationmovieti.objects.create(
movieid=Movie.objects.get(movieid=i),
uid=user
)
|
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-5, 5, 101) #-5부터 5까지 사이를 101등분하기
print(x)
y = (1 / np.sqrt(2 * np.pi)) * np.exp(- x ** 2 / 2 ) #평균이 0, 분산이 1일 때 정규분포의 y축 값
print(y)
plt.figure(figsize=(10, 6)) # 플롯 사이즈 지정
plt.plot(x, y)
plt.xlabel("x") # x축 레이블 지정
plt.ylabel("y") # y축 레이블 지정
plt.grid() # 플롯에 격자 보이기
plt.title("Normal Distribution without scipy") # 타이틀 표시
plt.legend(["N(0, 1)"]) # 범례 표시
plt.show() # 플롯 보이기 |
# Uses pretrained VGG16 model as a feature extractor
from tensorflow.keras.applications import VGG16
from tensorflow.keras.applications import imagenet_utils
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from sklearn.preprocessing import LabelEncoder
from io2.hdf5datasetwriter import HDF5DatasetWriter
from imutils import paths
import numpy as np
import progressbar
import argparse
import random
import os
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="Path to input dataset")
ap.add_argument("-o", "--output", required=True, help="Path to output dataset")
ap.add_argument("-b", "--batch-size", type=int, default=32, help="Batch size of image through network")
ap.add_argument("-s", "--buffer-size", type=int, default=1000, help="Feature extraction buffer size")
args = vars(ap.parse_args())
bs = args["batch_size"]
print("[INFO] Loading images...")
img_paths = list(paths.list_images(args["dataset"]))
random.shuffle(img_paths)
labels = [p.split(os.path.sep)[-2] for p in img_paths]
le = LabelEncoder()
labels = le.fit_transform(labels)
print("[INFO] Loading network...")
model = VGG16(weights="imagenet", include_top=False)
model.summary()
# init the dataset writer
dataset = HDF5DatasetWriter((len(img_paths), 512*7*7), args["output"], data_key="features", buf_size=args["buffer_size"])
dataset.store_class_labels(le.classes_)
widgets = ["Extracting Features: ", progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(img_paths), widgets=widgets).start()
for i in np.arange(0, len(img_paths), bs):
batch_paths = img_paths[i:i+bs]
batch_labels = labels[i:i+bs]
batch_imgs = []
for (j, path) in enumerate(batch_paths):
img = load_img(path, target_size=(224, 224))
img = img_to_array(img)
# preprocess by expanding dimensions and subtracting mean RGB from imagenet dataset
img = np.expand_dims(img, axis=0)
img = imagenet_utils.preprocess_input(img)
batch_imgs.append(img)
batch_imgs = np.vstack(batch_imgs)
features = model.predict(batch_imgs, batch_size=bs)
# reshape flattened feature vector of MaxPooling2D output
features = features.reshape((features.shape[0], 512*7*7))
dataset.add(features, batch_labels)
pbar.update(i)
dataset.close()
pbar.finish() |
# -*- coding: utf-8 -*-
"""
Created on Tue May 1 20:51:38 2018
@author: Administrator
"""
# Numpy 构造函数
# numpy.array(object, dtype = None, copy = True, order = None, subok = False, ndmin = 0)
# dtype : 数组所需的数据类型
# copy : 对象是否被复制
# order : 排序方式
# ndmin : 指定返回数组的最小维度
# Numpy 的运算单位 数组
# 数组的算数和逻辑运算
# 傅里叶变换
# 与线性代数有关的操作
# 导入numpy
import numpy as np
# 创建ndarray 一个维度
a = np.array([1,2,3])
print(a)
# 两维的ndarray
b = np.array([[1,2],[3,4]])
print(b)
c = np.array([1,2,3,4,5], ndmin = 3)
print(c)
d = np.array([1,2,3], dtype = np.complex)
print("complex:", d)
print(d.flags)
# np.arange(start, stop, step, dtype) 来自数值范围的数组
arange = np.arange(1, 24, 2, dtype = np.float)
print("arange:", arange)
# np.linspace(start, stop, num数组要生成的个数, endpoint 数组中是否包含stop值)
linspace = np.linspace(10, 20, 5, endpoint = False)
print("linspace:", linspace)
# np.logspace() 返回一个ndarray对象
# np.zeros 默认为float类型
zeros = np.zeros(5, dtype = np.int)
print("zeros:", zeros)
zeros1 = np.zeros(5)
print("zeros:", zeros1)
onesArray = np.ones(5)
print("ones:", onesArray)
# 使用现有数据创建数组
asArray = np.asarray([1,2,3,4], dtype = np.float)
print("as array:", asArray)
# NumPy数字类型是dtype 对象的实例
# NumPy的属性: shape reshape ndim itemsize
# shape reshape
shapeArray = np.array([[1,2,3],[4,5,6]])
print(shapeArray.shape) # (2,3) 两行三列
reshape = np.array([[1,2,3],[4,5,6]])
reshape.shape = (3,2) # 改为三行两列
print(reshape)
# ndim 数组的维数
arange = np.arange(24)
print(arange)
print(arange.ndim) # ndim 表示维数
# numpy.itemsize 返回数组中每个元素的字节单位长度
print(arange.itemsize)
arange1 = arange.reshape(2,4,3)
print(arange1)
# Numpy 切片和索引
# ndarray 对象的内容可以通过索引或者切片来访问或者修改
# 三种索引方法类型:字段访问、基本切片、高级切片
a = np.arange(10)
print(a)
print(a[5])
print(a[2:7:2])
print(a[2:])
np.array([[1,2,3], [3,4,5], [4,5,6]])
# 基本切片 python 中切片的概念扩展到n维
# 高级索引
# 高级索引始终返回数据的副本
# 两类高级索引:整数、布尔值
# 整数索引
# 布尔索引
a = np.array([[1,2,3], [2,3,4], [3,4,5]])
print(a)
print(a[ a > 3])
nanArray = np.array([np.nan, 1, 2, np.nan,3, 4])
print(nanArray)
print(nanArray[~np.isnan(nanArray)])
# Numpy 广播
# 广播是指numpy 在算术运算期间处理不同形状数组的能力
# 如果阵列的形状相同,操作被无缝执行
a = np.array([1,2,3,4])
b = np.array([10,20,30,40])
print(a * b)
# 广播
a = np.array([[0.0,0.0,0.0],[10.0,10.0,10.0],[20.0,20.0,20.0],[30.0,30.0,30.0]])
b = np.array([1.0,2.0,3.0])
print(a)
print(b)
print(a + b)
# 数组上的迭代
a = np.arange(0,60,5)
#print(a)
a = a.reshape(3,4) # 生成3*4的二维数组
#print(a)
# 遍历数组
#for x in np.nditer(a):
# print(x)
# 矩阵的转置
b = a.T
print(b)
|
# -*- coding: utf-8 -*
from __future__ import absolute_import
import geocoder
import httplib2
import os
import re
import requests
import sys
import time
from BeautifulSoup import BeautifulSoup
from bot.models import Cities, CityPhotos
from django.core.management.base import BaseCommand
from TelegramBot.settings import BASE_DIR
import logging
logger = logging.getLogger('cron')
reload(sys)
sys.setdefaultencoding('utf-8')
class Command(BaseCommand):
help = 'Get content from https://www.phototowns.ru web-site'
def handle(self, *args, **options):
"""
Get content from https://www.phototowns.ru web-site:
- list of Russian cities
- Top 10 photo of each city
- Author of photos
Return: dict of contents
"""
city_name = ''
try:
logger.debug('Start parser')
r = requests.get('https://www.phototowns.ru/all')
soup = BeautifulSoup(r.text)
cities = soup.findAll('div', style='width: 200px; height: 300px; float: left; margin: 10px;')
city_href = [city.findAll('a') for city in cities]
for href in city_href:
city_name = href[1]['title'].encode('utf-8')
city_name = re.split(' \\(|\\.| \xd0\xb8', city_name)[0]
city_url = href[1]['href'].encode('utf-8')
author = href[2].findAll('a', text=re.compile(r'.*'))[0].encode('utf-8')
r = requests.get(city_url)
soup = BeautifulSoup(r.text)
images = soup.findAll('dl', {'class': 'gallery-item'})[0:10]
images_href = [url.findAll('a')[0]['href'] for url in images]
# get TOP 10 images urls
img_url_lst = get_img_urls(images_href)
# get city name in English
city_name_en = get_city_name_en(city_name)
# Update or create Cities table
if city_name_en:
update_city = {'city_name': city_name,
'city_name_en': city_name_en['city'],
'geo_latitude_min': city_name_en['latitude_min'],
'geo_latitude_max': city_name_en['latitude_max'],
'geo_longitude_min': city_name_en['longitude_min'],
'geo_longitude_max': city_name_en['longitude_max'],
'city_url': city_url,
'author': author}
update_cities_table(update_city)
# Update or create City_photos table
update_city_photos_table(img_url_lst, city_name, author)
logger.debug('All cities are updated')
except Exception, e:
logger.error(str(e) + '--> {0}'.format(city_name))
def get_city_name_en(city_name):
"""
Get city name (Eng) by city name(Ru)
:param city_name: city name(Ru)
:return: Dict of city name(Eng), location(latitude), location(longitude)
"""
geo_data = {}
try:
g = geocoder.yandex(city_name)
geo_data['city'] = str(g.json['city']).encode('utf-8')
geo_data['longitude_min'] = g.json['bbox']['southwest'][1]
geo_data['longitude_max'] = g.json['bbox']['northeast'][1]
geo_data['latitude_min'] = g.json['bbox']['southwest'][0]
geo_data['latitude_max'] = g.json['bbox']['northeast'][0]
return geo_data
except Exception, e:
logger.error(str(e) + '-->City name: {0} and Geo data: {1}'.format(city_name, geo_data))
return geo_data
def get_img_urls(images_href):
"""
Get image url from html <a href="..."> tag
:param images_href: list of html <a href="..."> tag
:return: list of image urls
"""
try:
img_lst = []
for image in images_href:
r = requests.get(image)
soup = BeautifulSoup(r.text)
try:
time.sleep(0.5)
images_urls = soup.findAll('div', {'class': 'big_pic'})[0].findAll('img')[0]['src']
img_lst.append(images_urls)
except IndexError:
logger.debug('INDEX ERROR--> {0}'.format(image))
return img_lst
except Exception, e:
logger.error(str(e) + '--> {0}'.format(images_href))
def update_cities_table(update_city):
"""
Update or create Cities table
:param update_city: all Cities table fields
:return: None
"""
try:
Cities.objects.update_or_create(city_name=update_city['city_name'],
city_name_en=update_city['city_name_en'],
geo_latitude_min=update_city['geo_latitude_min'],
geo_latitude_max=update_city['geo_latitude_max'],
geo_longitude_min=update_city['geo_longitude_min'],
geo_longitude_max=update_city['geo_longitude_max'],
city_url=update_city['city_url'],
author=update_city['author'],
defaults=update_city)
except Exception:
Cities.objects.filter(city_name_en=update_city['city_name_en']).update(city_name=update_city['city_name'])
def update_city_photos_table(img_url_lst, city_name, author):
"""
Update or create CityPhotos table
:param img_url_lst: list of image urls
:param city_name: city name
:param author: photos author name
:return: None
"""
try:
city = Cities.objects.get(city_name=city_name, author=author)
# Create if not exists dirs for city photos and save them into it
h = httplib2.Http('.cache')
city_path = os.path.join(BASE_DIR, 'img', city.city_name_en)
if not os.path.exists(city_path):
os.makedirs(city_path)
logger.debug('CITY --> {0}'.format(city.city_name_en))
for url in img_url_lst:
current_dir = os.path.join(BASE_DIR, 'img', city.city_name_en, url.split('/')[-1])
if not os.path.exists(current_dir):
response, content = h.request(url)
with open(current_dir, 'wb') as f:
f.write(content)
update_photos = {'photo_url': url,
'photo_path': current_dir,
'city_id': city}
CityPhotos.objects.update_or_create(photo_url=url,
city_id=city,
photo_path=current_dir,
defaults=update_photos)
except Cities.DoesNotExist:
# ignore duplicate city ID
pass
except CityPhotos.DoesNotExist, e:
logger.error(str(e) + '--> {0}'.format(city_name))
|
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.pipeline import Pipeline
import pandas as pd
import pickle
customer_behavior_model = pickle.load(open('/home/cdsw/models/final_model.sav', 'rb'))
#Inputs:
#recency int64
#history float64
#used_discount int64
#used_bogo int64
#is_referral int64
#channel object -> one hot encoded
#offer object -> one hot encoded
#Target:
#conversion int64
def predict(data):
df = pd.DataFrame(data, index=[0])
df.columns = ['recency', 'history', 'used_discount', 'used_bogo', 'is_referral', 'channel', 'offer']
df['recency'] = df['recency'].astype(float)
df['history'] = df['history'].astype(float)
df['used_discount'] = df['used_discount'].astype(float)
df['used_bogo'] = df['used_bogo'].astype(float)
df['is_referral'] = df['is_referral'].astype(float)
return {'result': customer_behavior_model.predict(df)[0]}
#{
# "recency": “6”,
# "history": "329.08",
# "used_discount": “1”,
# "used_bogo": "1",
# "is_referral": "1",
# "channel": "Web",
# "offer": "No Offer"
#}
#{
# "result": "1"
#} |
import numpy as np
import os.path
import csv
import pandas as pd
import nres_comm as nr
def stds_addline(types="",fnames="",navgs="",sites="",cameras="",jdates="",flags=""):
'''
Reads the standards.csv file, appends a line containing the data in the
argument list, sorts the resulting list into increasing time order,
and writes the result back out to standards.csv.
Calling this routine with no arguments causes the standards.csv file
to be sorted into time order, without otherwise changing it.
'''
nr.nresroot=os.getenv("NRESROOT")
stdfile=nr.nresroot+'reduced/csv/standards.csv'
if len(fnames)>0:
dat=np.column_stack((types,fnames,navgs,sites,cameras,jdates,flags))
outfile = open(stdfile,"a")
# get a csv writer
writer = csv.writer( outfile )
[ writer.writerow(x) for x in dat ]
# close file
outfile.close()
df = pd.read_csv(stdfile)
df = df.sort_values('JDdata')
df.to_csv(stdfile,index=False) |
values = open('08.in', 'r').read()[:-1]
width = 25
height = 6
no_of_layers = len(values) // (width * height)
layers = []
for i in range(0, no_of_layers):
start = i * width * height
end = (i + 1) * width * height
layers.append([x for x in values[start:end]])
min_i = 0
for i in range(0, no_of_layers):
if layers[i].count('0') < layers[min_i].count('0'):
min_i = i
print(layers[min_i].count('1') * layers[min_i].count('2')) |
import torchvision
import torch
model = torchvision.models.resnet18(pretrained=False)
model.fc = torch.nn.Linear(512, 2)
model.load_state_dict(torch.load('best_steering_model_xy.pth'))
device = torch.device('cuda')
model = model.to(device)
model = model.eval().half()
import torchvision.transforms as transforms
import torch.nn.functional as F
import cv2
import PIL.Image
import numpy as np
mean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half()
std = torch.Tensor([0.229, 0.224, 0.225]).cuda().half()
def preprocess(image):
image = PIL.Image.fromarray(image)
image = transforms.functional.to_tensor(image).to(device).half()
image.sub_(mean[:, None, None]).div_(std[:, None, None])
return image[None, ...]
from IPython.display import display
import ipywidgets
import traitlets
from jetbot import Camera, bgr8_to_jpeg
camera = Camera()
image_widget = ipywidgets.Image()
traitlets.dlink((camera, 'value'), (image_widget, 'value'), transform=bgr8_to_jpeg)
display(image_widget)
from jetbot import Robot
robot = Robot()
speed_gain_slider = ipywidgets.FloatSlider(min=0.0, max=1.0, step=0.01, description='speed gain')
steering_gain_slider = ipywidgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=0.2, description='steering gain')
steering_dgain_slider = ipywidgets.FloatSlider(min=0.0, max=0.5, step=0.001, value=0.0, description='steering kd')
steering_bias_slider = ipywidgets.FloatSlider(min=-0.3, max=0.3, step=0.01, value=0.0, description='steering bias')
display(speed_gain_slider, steering_gain_slider, steering_dgain_slider, steering_bias_slider)
x_slider = ipywidgets.FloatSlider(min=-1.0, max=1.0, description='x')
y_slider = ipywidgets.FloatSlider(min=0, max=1.0, orientation='vertical', description='y')
steering_slider = ipywidgets.FloatSlider(min=-1.0, max=1.0, description='steering')
speed_slider = ipywidgets.FloatSlider(min=0, max=1.0, orientation='vertical', description='speed')
display(ipywidgets.HBox([y_slider, speed_slider]))
display(x_slider, steering_slider)
angle = 0.0
angle_last = 0.0
def execute(change):
global angle, angle_last
image = change['new']
xy = model(preprocess(image)).detach().float().cpu().numpy().flatten()
x = xy[0]
y = (0.5 - xy[1]) / 2.0
x_slider.value = x
y_slider.value = y
speed_slider.value = speed_gain_slider.value
angle = np.arctan2(x, y)
pid = angle * steering_gain_slider.value + (angle - angle_last) * steering_dgain_slider.value
angle_last = angle
steering_slider.value = pid + steering_bias_slider.value
robot.left_motor.value = max(min(speed_slider.value + steering_slider.value, 1.0), 0.0)
robot.right_motor.value = max(min(speed_slider.value - steering_slider.value, 1.0), 0.0)
execute({'new': camera.value})
camera.observe(execute, names='value')
camera.unobserve(execute, names='value')
robot.stop() |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('index', views.index, name='index'),
path('signup', views.register, name='signup'),
path('home', views.home, name='home'),
path('logout', views.logout, name='logout'),
path('update', views.update, name='update'),
]
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import types
from yaql.language.exceptions import YaqlException
from yaql.language.exceptions import YaqlExecutionException
from yaql.language.exceptions import YaqlSequenceException
from yaql.tests import YaqlTest
import yaql.tests.testdata
class TestCollections(YaqlTest):
def test_get_by_index(self):
int_list = [1, 2, 3, 4, 5, 6]
self.assertEquals(4, self.eval('$[3]', int_list))
def test_where_by_index(self):
int_list = [1, 2, 3, 4, 5, 6]
self.assertRaises(YaqlException, self.eval, '$.where(3)', int_list)
def test_filter_by_predicate(self):
int_list = [1, 2, 3, 4, 5, 6]
self.assertEquals([4, 5, 6], list(self.eval('$[$>3]', int_list)))
def test_filter_by_non_boolean_predicate(self):
int_list = [1, 2, 3, 4, 5, 6]
self.assertRaises(YaqlException, self.eval, '$.where($+1)', int_list)
def test_list_definition(self):
self.assertEquals([1, 2, 3], self.eval('list(1,2,3)'))
def test_dict_definition(self):
self.assertEval({'key1': 'value', 'key2': 100},
'dict(key1=>value, key2=>100)')
def test_wrong_dict_definition(self):
self.assertRaises(YaqlExecutionException, self.eval, 'dict(a,b,c)')
self.assertRaises(YaqlExecutionException, self.eval,
'dict(a=>b=>c, a=>d=>e)')
def test_in(self):
int_list = [1, 2, 3, 4, 5, 6]
self.assertTrue(self.eval('4 in $', int_list))
def test_not_in(self):
int_list = [1, 2, 3, 4, 5, 6]
self.assertFalse(self.eval('7 in $', int_list))
def test_iterable_property_attribution(self):
data = yaql.tests.testdata.users
expression = "$.email"
self.assertEquals(
['user1@example.com',
'user2@example.com',
'user3@example.com'],
self.eval(expression, data))
def test_iterable_property_attribution_2(self):
data = yaql.tests.testdata.data
expression = "$.users.email"
self.assertEquals(
['user1@example.com',
'user2@example.com',
'user3@example.com'],
self.eval(expression, data))
def test_iterable_dictionary_attribution(self):
data = yaql.tests.testdata.data
expression = "$.services.'com.mirantis.murano.yaql.name'"
self.assertEquals(['Service1', 'Service2',
'Service3', 'Service4'],
self.eval(expression, data))
def test_join(self):
data = yaql.tests.testdata.data
expression = "$.services.join($.users, " \
"$1.'com.mirantis.murano.yaql.owner'=$2.id, " \
"dict(service_name=>" \
"$1.'com.mirantis.murano.yaql.name', " \
"user_name=>$2.email))"
value = self.eval(expression, data=data)
self.assertEqual('Service1', value[0]['service_name'])
self.assertEqual('Service2', value[1]['service_name'])
self.assertEqual('Service3', value[2]['service_name'])
self.assertEqual('Service4', value[3]['service_name'])
self.assertEqual('user1@example.com', value[0]['user_name'])
self.assertEqual('user1@example.com', value[1]['user_name'])
self.assertEqual('user2@example.com', value[2]['user_name'])
self.assertEqual('user3@example.com', value[3]['user_name'])
def test_select(self):
data = [1, 2, 3, 4]
expression = "$.select($*10)"
self.assertEval([10, 20, 30, 40], expression, data)
def test_data_sum(self):
data = [1, 2, 3, 4]
expression = "$.sum()"
self.assertEval(10, expression, data)
def test_method_sum(self):
expression = "list(1,2,3,4).sum()"
self.assertEval(10, expression)
def test_function_sum(self):
expression = "sum(list(1,2,3,4))"
self.assertEval(10, expression)
def test_range_const(self):
expression = "range(0,4)"
self.assertEval([0, 1, 2, 3], expression)
def test_range_computed(self):
expression = "range(1+2, 10-4)"
self.assertEval([3, 4, 5], expression)
def test_take_while(self):
data = [1, 2, 3, 4]
self.assertEval([1, 2], "$.take_while($<3)", data)
def test_infinite_random_loop(self):
val = self.eval("range(0).select(random()).take_while($<0.99)")
for v in val:
self.assertTrue(0 < v < 0.99)
def test_generator_limiting(self):
# do not use self.eval here as it uses limiting on its own
v = yaql.parse('range(0, 10)').evaluate()
self.assertTrue(isinstance(v, types.GeneratorType))
v2 = yaql.parse('range(0, 10).list()').evaluate()
self.assertTrue(isinstance(v2, list))
v3 = yaql.parse('range(0).list()')
self.assertRaises(YaqlSequenceException, v3.evaluate)
def test_select_for_each(self):
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expression = "$.for_each(switch(($>5)=>$, " \
"($>2)=>('_'+string($)), true=>0))"
self.assertEval([0, 0, "_3", "_4", "_5", 6, 7, 8, 9, 10], expression,
data)
|
import sys
import json
import re
#assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
from pyspark.sql import SparkSession, functions, types
spark = SparkSession.builder.appName('example code').getOrCreate()
assert spark.version >= '2.3' # make sure we have Spark 2.3+
# sc = spark.sparkContext
# add more functions as necessary
#spark-submit area-storenumber.py output-1 area.csv dsareastorenumber
def main(inputs,input_area,output):
observation_schema = types.StructType([
types.StructField('county_area', types.StringType(), True),
types.StructField('area', types.FloatType(), True),
])
etl_data = spark.read.option('multiline', True).parquet(inputs)
area_data = spark.read.option('delimiter', '\t').csv(input_area, schema = observation_schema)
area_data1 = area_data.withColumn('county_area',functions.lower(functions.col('county_area')))
etl_data1 = etl_data.select('county','sale','storenumber','storename')
etl_data2 = etl_data1.groupBy('county').agg(functions.count('storenumber').alias('no of stores in a county'))
#etl_data1.show()
area_etl = etl_data2.join(area_data1,area_data1.county_area == etl_data2.county)
#area_etl.show()
area_etl.coalesce(1).write.csv(output, mode='overwrite')
#etl_data1.write.parquet(output, mode='overwrite')
if __name__ == '__main__':
inputs = sys.argv[1]
input_area = sys.argv[2]
output = sys.argv[3]
main(inputs,input_area,output) |
from django import forms
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class registerform(UserCreationForm):
username = forms.CharField(max_length=100, required=True, help_text='',
widget=forms.TextInput(
attrs={
"type": "text",
"placeholder": ("Enter Username"),
}
))
email = forms.EmailField(required=True,
widget=forms.EmailInput(
attrs={
"type": "email",
"placeholder": ("Enter E-mail"),
}
))
password1 = forms.CharField(label='Password',
widget=forms.PasswordInput(
attrs={
"type": "password",
"placeholder": ("Enter Password"),
}
),
error_messages={'msg':'Must include an uppercase letter, a lowercase letter and a symbol'})
password2 = forms.CharField(label='Confirm Password',
widget=forms.PasswordInput(
attrs={
"type": "password",
"placeholder": ("Password Confirmation"),
}
),
error_messages={'msg':'Make sure you entered the same password as above. Passwords are case sensitive.'})
class Meta:
model = User # Changes the user model.(default)
fields = ["username", "email", "password1", "password2"]
class LoginForm(forms.Form):
username = forms.CharField(max_length=255, required=True)
password = forms.CharField(widget=forms.PasswordInput, required=True)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if not user or not user.is_active:
raise forms.ValidationError("Sorry, that login was invalid. Please try again.")
return self.cleaned_data
def login(self, response):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
return user |
# OOPs concepts / properties
# - abstraction
# --- ATM ---
# -- hiding implementation -- user is not interested at all in process
# python -- private public ---- var
# var = 10 -- public
# _var = 10 --- protected - single underscore
# __var = 10 -- private -- strictly protected -- double underscore
# Company --
# XYZ
# Amol -- Sales -- 2 under --
# Ashish -- HR --
# Ashiwni -- Account
# - encapsulation - wrapping data in single unit
# Laptop -- ram, rom, cdrive, usb port, ssd, screen, graphic card --
# classes -- methods, variables - BEST Example
#
# object
# - Access Specifiers -- Abstraction
# public, private, protected
# - inheritance- getting properties from parent class
# -- Parents(1 Acre Plot) -- Son/Daughter(1 Acre)
# Parent class Base class -- Sublcass child class
# genetic disease --
# - polymorphism -one thing many forms
# poly -- many
# morphism -- forms
# +
# Person
# -
# inheritance
class Professors:
#setters
def set_id(self, pid):
self.ID = pid
def set_name(self, name):
self.Name = name
def set_age(self, age):
self.Age = age
def set_salary(self, sal):
self.Salary = sal
#getters
def get_id(self):
return self.ID
def get_name(self):
return self.Name
def get_age(self):
return self.Age
def get_salary(self):
return self.Salary
p1 = Professors()
p1.set_id(101)
p1.set_name("ABC")
p1.set_age(25)
# print(p1.__dict__)
# print(p1.get_id(),p1.get_name(), p1.get_age())
class Student(Professors):
def set_marks(self, mrk):
self.Marks = mrk
def get_marks(self):
return self.Marks
s1 = Student()
s1.set_id(102)
s1.set_name("XYZ")
s1.set_age(19)
s1.set_marks(65)
# print(s1.__dict__)
# print(s1.get_id(),s1.get_name(), s1.get_age(), s1.get_marks())
# print(dir(s1))
class Father:
def __init__(self,property):
self.FatherProperty = property
def show_property(self):
print(f"Father's Property:- {self.FatherProperty}")
class Daughter(Father):
def __init__(self, f_property, d_property):
super().__init__(f_property)
self.DaughterProperty = d_property
def show_property(self):
print(f"Daughter's Property:- {self.DaughterProperty}, Father's Property:- {self.FatherProperty},Total Property has:-{self.DaughterProperty + self.FatherProperty} ")
# d1 = Daughter(d_property=5, f_property=10)
# print(d1.__dict__)
# d1.show_property()
class A:
def __init__(self, a):
self.a = a
def __init__(self, a, b):
self.a = a
self.b = b
def __init__(self, a , b, c):
self.a = a
self.b = b
self.c = c
#override- takes the latest value
#overloading - calls the constructor which has that arguments
# a= A(10,20)
# print(a)
# inheritance
# single level
# multilevel
class A:
def m1(self):
print("In class A-m1")
class B(A):
def m1(self, flag=None):
if flag == 'A':
super().m1()
elif flag == 'B' or not flag:
print("In class B-m1")
elif flag == "AB":
super().m1()
print("In class B-m1")
def m1(self):
print("In class B-m1")
def m2(self):
super().m1()
def m3(self):
super().m1()
self.m1
obj = B()
obj.m3
# case 1 -- Child class has no any method/constructor --- access parent's properties
# case 2 -- child class has its own method/constructor --- access own properties
# case 3 -- Child class has its own method/constructor -- access own + parents properties
# case 4 -- Child class has its own method/constructor -- access parents one only
# class Square:
# def __init__(self, side):
# self.Side = side
# def area(self):
# print(f"Area of square is:-{self.Side * self.Side}")
# def utility_method(self, *args, **kwargs):
# print("Square utility :- ",args, kwargs)
# # Square.utility_method(self, *args, **kwargs)
# # l =Square(10)
# # l.area()
# class Rectangle(Square):
# def __init__(self, side, breadth):
# super().__init__(side)
# Square.__init__(self, side)
# self.Breadth = breadth
# def area(self):
# # super().area()
# Square.area(self)
# print(f"Area of rectangle is:-{self.Side * self.Breadth}")
# def utility_method(self, *args, **kwargs):
# print("Rectangle utility :- ",args, kwargs)
# Square.utility_method(self, *args, **kwargs)
# obj = Rectangle(side=5, breadth=3)
# # obj.area()
# obj.utility_method(4,5,6,7,a=2)
# class A(object):
# def m1(self):
# print("In class A-m1")
# class B(A):
# # def m1(self):
# # print("In class B-m1")
# pass
# class C(B):
# # def m1(self):
# # print("In class C-m1")
# pass
# class D(C):
# # def m1(self):
# # print("In class D-m1")
# pass
# obj = D()
# # obj.m1()
# class Sample(object):
# def __new__(cls):
# print("In new method")
# print("Creating Instance")
# return super().__new__(cls)
# def __init__(self):
# print("In init method")
# s1 = Sample()
# d = {"a": 2, "b":10, "c": 20}
# class A:
# def __init__(self, **kwargs):
# print(kwargs)
# self.A = kwargs.pop("a")
# print(self.A, "hi")
# print(kwargs)
# a = A(**d)
# import time
# def func(num):
# l = []
# for i in range(1, num+1):
# l.append(i)
# return l
# t1 = time.time()
# print(func(10000))
# t2 = time.time()
# print(t2-t1)
# multiple -- multiple parents --
# class Father:
# # def height(self):
# # print("Height:- 5.6")
# pass
# class Mother:
# def complexion(self):
# print("fair")
# pass
# class Son(Father,Mother):
# # def height(self):
# # print("Height:- 6.6")
# # pass
# # def complexion(self):
# # print("Black")
# pass
# # s = Son()
# # s.height()
# # s.complexion()
# # if hasattr(s, "height"):
# # s.height()
# # else:
# # print("Son has no attribute height")
# print(Son.__mro__)
# class A(object):
# def m1(self):
# print("In A-m1--1")
# super().m1()
# print("In A-m1--2")
# class B(object):
# def m1(self):
# print("In B-m1--1")
# super().m1()
# print("In B-m1--2")
# class C(object):
# def m1(self):
# print("In C-m1--1")
# # super().m1()
# print("In C-m1--2")
# class D(A, B):
# def m1(self):
# print("In D-m1--1")
# super().m1()
# print("In D-m1--2")
# class E(B, C):
# def m1(self):
# print("In E-m1--1")
# super().m1()
# print("In E-m1--2")
# class Z(D, E, C):
# def m1(self):
# print("In Z-m1--1")
# super().m1()
# print("In Z-m1--2")
# pass
# z= Z()
# print(Z.mro())
# z.m1()
from abc import ABC, abstractmethod
# class RBI(ABC):
# @abstractmethod
# def atm_count_50_in_city(self):
# pass
# @abstractmethod
# def loan_interest_rate_10_to_15(self):
# pass
# @abstractmethod
# def NEFT(self):
# pass
# @abstractmethod
# def Netbanking(self):
# pass
# class HDFC(RBI):
# # def atm_count_50_in_city(self):
# # print("67 ATM's in city")
# # def loan_interest_rate_10_to_15(self):
# # print("11.5% on personal loan")
# # def NEFT(self):
# # print("NEFT and RTGS are both avaialble")
# # def Netbanking(self):
# # print("Secured Netbanking")
# pass
# # h1 = HDFC()
# # h1.NEFT()
# class SBI(RBI):
# def atm_count_50_in_city(self):
# print("95 ATM's in city")
# def loan_interest_rate_10_to_15(self):
# print("10.5% on personal loan")
# def NEFT(self):
# print("NEFT and RTGS are both avaialble")
# def Netbanking(self):
# print("Secured Netbanking and mobile banking")
# # s1 = SBI()
# # s1.Netbanking()
# class Car(ABC):
# @abstractmethod
# def steering(self):
# pass
# @abstractmethod
# def braking(self):
# pass
# def airbag(self):
# print("available")
# class Tata(Car):
# def steering(self):
# print("Power Steering")
# def braking(self):
# print("ABS braking system")
# nexon = Tata()
# nexon.airbag()
# print(nexon)
# tiago = Tata()
# # print(tiago)
# class FileOpeClass(ABC):
# @abstractmethod
# def open(self):
# pass
# @abstractmethod
# def close(self):
# pass
# class Excel(FileOpeClass):
# def open(self, path):
# print("Excel File is opened path for that file:- ", path)
# def close(self):
# print("Excel file is closed")
# e1 = Excel()
# e1.open()
# e1.close()
# class Text(FileOpeClass):
# def open(self):
# print(" Text file is opened:- ")
# def close(self):
# print("Text file is closed")
# class Implementationclass:
# user_input = input("enter a file type:- ")
# class_name = globals()[user_input]
# obj = class_name()
# obj.open()
# obj.close()
# def foo():
# flag = True
# if not flag:
# return not True
# print (foo())
# f = foo()
# print (f())
|
from django.conf.urls import url
from django.contrib import admin
from core import views
from . import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', views.index, name="home"),
url(r'^admin/', admin.site.urls),
url(r'^select/role/', views.select_role, name='select-role'),
url(r'^signup/(?P<role>[-\w]+)/$', views.signup, name='signup'),
url(r'^login/', views.auth, name='login'),
url(r'^logout', views.logout_user, name='logout'),
url(r'^users/$', views.user_list, name='user-list'),
url(r'^users/(?P<pk>\d+)/$', views.user_detail, name='user-detail'),
url(r'^update/$', views.update_profile, name='update-profile'),
url(r'^dashboard/', views.admin_dashboard, name='admin-dashboard'),
url(r'^search/$', views.search),
] + static(settings.STATIC_URL, document_root=settings.STATIC_URL) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
#encoding UTF-8
#def body
<div class="row">
<h1>Accommodations</h1>
<p class="left">The wedding ceremony and reception will be at Transmitter Park in Greenpoint, Brooklyn (see: <a href="${ROOT_URL}/map">Map</a>) at 12 noon. It's a small, waterfront park and it will be obvious as soon as you enter where the wedding will be.</p>
<p class="left">Public transportation to and from the wedding is strongly recommended! We are New Yorker transplants, and, for our special day, you can, too.</p>
<p class="left">New York City has many, many, many hotels. <a href="http://airbnb.com">Air B & B</a> is popular if you'd like to stay in a homey apartment as an alternative. Ask us if you find a hotel/Air B&B and aren't sure if it's easy to take the train to our wedding. We're pretty damn good at subway optimizations by now.</a></p>
<div style="clear: both;"></div>
<h2 class="left">Queens</h2>
<ul class="accomo-list">
<li>Long Island City is the most convenient to Greenpoint. <a href="http://www.kayak.com/hotels/Long-Island-City,NY-c207/2013-06-07/2013-06-09">Here</a> are the kayak.com results and <a href="https://www.airbnb.com/s/Long-Island-City--Queens--NY?checkin=06%2F07%2F2013&checkout=06%2F09%2F2013&guests=2&sw_lat=40.73916676620979&sw_lng=-73.95688045251461&ne_lat=40.75399261855144&ne_lng=-73.93731105554195&search_by_map=true">these</a> are the Air B&B listings.</a> <br /><br />
To get to the wedding from Long Island City, take the <b>G</b> at the Court Square station two stops south to the Greenpoint Avenue station.</li>
</ul>
<Br />
<h2 class="left">Manhattan</h2>
<ul class="accomo-list">
<li>Times Square or Grand Central typically have the best hotel deals. <a href="http://www.kayak.com/hotels/Times-Square,New-York,NY-c15830-l5515/2013-06-07/2013-06-09">Here</a> are some hotels from kayak.com and <a href="https://www.airbnb.com/s/Times-Square--New-York--NY?checkin=06%2F07%2F2013&checkout=06%2F09%2F2013&guests=2">here</a> are the listings for airbnb.com.<br /><br /> To get to the wedding from Times Square or Grand Central, take the <b>7</b> (Times Square or Grand Central Station) to the Court Square station in Queens, then the <b>G</b> south to the Greenpoint Avenue station. </li>
<li>Anything near 14th Street in Manhattan is an easy commute to Greenpoint. These neighborhoods include Union Square, West Village, East Village, and Flatiron (up to 23rd Street and as low as 10th Street). <br /><br />
To get to the wedding from those areas, walk to your nearest 14th Street subway station (from west to east: 8th Ave, 6th Ave, Union Square, 3rd Ave, 1st Ave) for the <b>L</b> train. Take the <b>L</b> to the Metropolitan/Lorimer station, the second stop in Brooklyn. Transfer to the Court Square (north-bound) <b>G</b> and get off at the Greenpoint Avenue station.</li>
</ul>
<br />
<br />
<h2 class="left">Brooklyn</h2>
<ul class="accomo-list">
<li>The <a href="http://www.tripadvisor.com/Hotel_Review-g60827-d2452838-Reviews-The_Box_House_Hotel-Brooklyn_New_York.html">Box Hotel</a> is within walking distance to the wedding though it is not very NYC-scenic.
<br />
<br />
Walk south on Manhattan Avenue, pick up some delicious Polish sausage or baked goods, go west on Greenpoint Avenue and walk until you run into the East River. You have arrived at Transmitter Park. </li>
<li>Hotels off the <b>G</b> subway line will be your best bet to getting to the wedding. Places in neighborhoods like Fort Greene, Park Slope, Carroll Gardens, and a few others will work.
<br />
<br />
From these neighborhoods, take a Court-Square (northbound) <b>G</b> to the Greenpoint Avenue station.</li>
</ul>
</div>
#end def
#include "media/tmpl/includes/base.py"
|
from __future__ import division
import numpy as np
from sklearn.svm import SVC
from scipy.special import expit
import copy
from scipy.stats import norm
from background_check import BackgroundCheck
class OcDecomposition(object):
def __init__(self, base_estimator=BackgroundCheck(),
normalization=None):
self._base_estimator = base_estimator
self._estimators = []
self._thresholds = []
self._normalization = normalization
self._priors = []
self._means = []
def fit(self, X, y, threshold_percentile=10, mus=None, ms=None):
classes = np.unique(y)
n_classes = np.alen(classes)
class_count = np.bincount(y)
self._priors = class_count / np.alen(y)
for c_index in np.arange(n_classes):
c = copy.deepcopy(self._base_estimator)
c.fit(X[y == c_index])
self._estimators.append(c)
scores = self.score(X, mus=mus, ms=ms)
self._thresholds = np.zeros(len(self._estimators))
for c_index in np.arange(n_classes):
u = np.unique(scores[:, c_index])
self._thresholds[c_index] = np.percentile(u, threshold_percentile)
# self._thresholds = np.percentile(scores, threshold_percentile, axis=0)
# for i, t in enumerate(self._thresholds):
# if t == 0.0:
# s = scores[:, i]
# self._thresholds[i] = np.amin(s[s > 0])
self._means = scores.mean(axis=0)
def set_estimators(self, estimators, X, y, threshold_percentile=10,
mus=None, ms=None):
classes = np.unique(y)
n_classes = np.alen(classes)
self._estimators = estimators
class_count = np.bincount(y)
self._priors = class_count / np.alen(y)
scores = self.score(X, mus=mus, ms=ms)
self._thresholds = np.zeros(len(self._estimators))
for c_index in np.arange(n_classes):
u = np.unique(scores[:, c_index])
self._thresholds[c_index] = np.percentile(u, threshold_percentile)
self._means = scores.mean(axis=0)
def score(self, X, mus=None, ms=None):
if type(self._base_estimator) is BackgroundCheck:
return self.score_bc(X, mus=mus, ms=ms)
elif self._normalization in ["O-norm", "T-norm"]:
return self.score_dens(X) + 1e-8 # this value is added to avoid
# having 0-valued thresholds,
# which is a problem for o-norm
def score_dens(self, X):
n = np.alen(X)
scores = np.zeros((n, len(self._estimators)))
for i, estimator in enumerate(self._estimators):
s = np.exp(estimator.score(X))
scores[range(n), i] = s
return scores
def score_bc(self, X, mus=None, ms=None):
n = np.alen(X)
probas = np.zeros((n, len(self._estimators)))
for i, estimator in enumerate(self._estimators):
if mus is None:
mu = None
else:
mu = mus[i]
if ms is None:
m = None
else:
m = ms[i]
probas[range(n), i] = estimator.predict_proba(X, mu=mu, m=m)[:, 1]
return probas
def predict(self, X, mus=None, ms=None):
scores = self.score(X, mus=mus, ms=ms)
if type(self._base_estimator) is BackgroundCheck:
return self.predict_bc(scores)
elif self._normalization == "O-norm":
return self.predict_o_norm(scores)
elif self._normalization == "T-norm":
return self.predict_t_norm(scores)
def predict_o_norm(self, scores):
reject = scores <= self._thresholds
scores /= self._thresholds
scores[reject] = -1
max_scores = scores.max(axis=1)
predictions = scores.argmax(axis=1)
predictions[max_scores <= 1] = len(self._estimators)
return predictions
def predict_t_norm(self, scores):
reject = scores <= self._thresholds
scores -= self._thresholds
means = self._means - self._thresholds
scores = (scores / means) * self._priors
scores[reject] = -np.inf
max_scores = scores.max(axis=1)
predictions = scores.argmax(axis=1)
predictions[max_scores <= 0] = len(self._estimators)
return predictions
def predict_bc(self, scores):
reject = scores <= self._thresholds
total_reject = (np.sum(reject, axis=1) == len(self._estimators))
scores[reject] = -1
predictions = scores.argmax(axis=1)
predictions[total_reject] = len(self._estimators)
return predictions
def accuracy(self, X, y, mus=None, ms=None):
predictions = self.predict(X, mus=mus, ms=ms)
return np.mean(predictions == y)
@property
def thresholds(self):
return self._thresholds
|
# PoolEvaluator.py
from SingleNetworkEvaluator import *
import sys
IS_PY2 = sys.version_info < (3, 0)
if IS_PY2:
from Queue import Queue
else:
from queue import Queue
from threading import Thread
class Worker(Thread):
""" Thread evaluating individuals from a given individuals queue """
def __init__(self, evaluator, individuals):
Thread.__init__(self)
self.individuals = individuals
self.evaluator_id = id
self.daemon = True
self.evaluator = evaluator
self.start()
def run(self):
while True:
individual = self.individuals.get()
try:
self.evaluator.evaluate(individual)
except Exception as e:
""" An exception happened in this thread """
print(e)
finally:
""" Mark this individual evaluation as done, whether an exception happened or not """
self.individuals.task_done()
class ThreadPool:
""" Pool of threads evaluating individuals from a queue """
def __init__(self, evaluator_list, population_size):
self.individuals = Queue(population_size)
for evaluator in evaluator_list:
Worker(evaluator, self.individuals)
def add_individual(self, individual):
""" Add a individual to the queue """
self.individuals.put(individual)
def map(self, individual_list):
""" Add a list of tasks to the queue """
for individual in individual_list:
self.add_individual(individual)
def wait_completion(self):
""" Wait for completion of all the tasks in the queue """
self.individuals.join()
class ThreadPoolEvaluator:
def __init__(self, dataset_filename, population_path, num_threads, population_size):
self.population_size = population_size
self.num_individual = 0
""" Creat num_threads evaluators on different GPUs """
evaluator_list = []
for i in range(num_threads):
device_id = '/device:GPU:%d' % i
population_path_device = population_path + '/gpu_%d' % i
evaluator_list.append( SingleNetworkEvaluator(dataset_filename, population_path_device, gpu_id=device_id) )
""" Instantiate a thread pool with NUM_THREADS evaluator threads """
self.pool = ThreadPool(evaluator_list, self.population_size)
def evaluate(self, individual):
self.pool.add_individual(individual)
self.num_individual += 1
""" Once a whole population is added to the queue,
the program pauses untill the population is evaluated """
if self.num_individual >= self.population_size:
self.pool.wait_completion()
# reset the counter
self.num_individual = 0
|
def datos():
nombres = {
'Nombre': 'Maria'
}
return nombres
def pedir_nombre():
nombre = raw_input('Ingrese un nombre: ')
diccionario_prueba = datos()
if nombre in diccionario_prueba:
print('la llave existe')
else:
print('la llave no existe')
if __name__ == '__main__':
pedir_nombre()
|
# -*- coding: utf-8 -*-
import lucene
lucene.initVM()
from lupyne import engine
import operator
import codecs
import nltk
import pickle
import math as m
import json
import re
from nltk.collocations import *
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import csv
from networkx.readwrite import json_graph
stopwords = nltk.corpus.stopwords.words('russian')
tokenizer = nltk.RegexpTokenizer(ur'[^A-Ba-b\s\.\",:;\(\)\!\?]+')
indexer = engine.Indexer('./lucene_index_05')
country_string = ' '.join([x for x in open('country_list.txt').read().split('\n')
if len(x.split()) < 2])
# print(country_string)
# print(type(country_string))
hits = indexer.search('stemmed_text:(целеваядат девян "90-е" "90-х" "90-ым") NOT objects:({})'.format(country_string))
reverse_first = indexer.search('(stemmed_text:(целеваядат девян "90-е" "90-х" "90-ым") AND objects:({})) OR (*:* AND NOT stemmed_text:(целеваядат девян "90-е" "90-х" "90-ым"))'.format(country_string))
all = indexer.search()
#print(len(hits))
assert len(hits) + len(reverse_first) == len(all)
decile_one_first = hits[:(len(hits)//5)]
deciles2_10_first = hits[(len(hits)//5):]
#decile_one = indexer.search('objects:Ельцин AND stemmed_text:(целеваядат девян "90-е" "90-х" "90-ым") NOT objects:({})'.format(country_string))
# print(len(decile_one))
# for hit in decile_one[:10]:
# print(hit['title'])
# print(hit['text'])
# print(hit['objects'])
# print(hit['mentions'])
# print('\n')
# reverse = list(reverse_p[510:600])
# decile_one = list(decile_one[:10])
#
# reverse += decile_one[3:5]
# decile_one += reverse[55:57]
lbtc0 = [-0.6931471806, 0.7978845605, -0.3183098861, 0.0363356023, 0.00478211176,
-0.000036980745, -0.0002120257200, -0.0000521599987, -0.161680983E-5, 0.2851334009E-5]
lbtc1 = [ -53.23128515, 10.09809323, -0.4952773111, 0.0002977333987,
0.00002074390927, 0.1514595081e-5, 0.1131629551e-6, 0.8540593192e-8, 0.6458507274e-9, 0.4865830484e-10]
lbtc2 = [-203.9171554, 20.04975307, -0.4987683692, 0.00004045442982, 0.1487647983e-5,
0.5807156793e-7, 0.2349932467e-8]
lbtc3 = [-454.3212440, 30.03325967, -0.4994481142, 0.00001218332169, 0.3019140505e-6,
0.7963045815e-8, 0.2182998026e-9]
def find_L(hits_object):
L = 0
for hit in hits_object:
L += len(tokenizer.tokenize(unicode(hit['text'])))
print('Calculating L...')
print('Found L: %d' % L)
return L
def find_objects(hits_object, dump_path=None):
objects = []
for hit in hits_object:
try:
objects.extend(hit['objects'].split('|||'))
except AttributeError:
print('No objects in this text:')
print(hit['text'])
object_set = set(objects)
if dump_path:
with open(dump_path, 'w') as dump_file:
pickle.dump(object_set, dump_file)
print('Found %d objects' % len(object_set))
return object_set
def get_object_dictionary(hits_obj, add_null_objects=False, dump_path=None,
punish_singletext=None, punish_length=False, kill=0):
object_dict = {}
for hit in hits_obj:
try:
objects = hit['objects'].split('|||')
mentions = [int(x.strip()) for x in hit['mentions'].split('|||')]
try:
length = len(tokenizer.tokenize(hit['text']))
except TypeError:
length = 4000
if punish_length:
if length > 1000:
mentions = [x * (float(1000) / length) for x in mentions]
except AttributeError:
print('WTH was this just now:')
print(hit['title'])
print(hit['text'])
objects = []
mentions =[]
except UnicodeEncodeError:
print(hit['mentions'])
print('NO OBJECT DATA')
objects = []
mentions = []
for object in objects:
if re.search(';', object):
# print('EXTENDING, PREVIOUS STATE:')
# print('|||'.join(objects))
# print('|||'.join([str(x) for x in mentions]))
temps = [x.split(';') for x in object.split()]
#assert len(temps) < 3
new_objs = []
if len(temps) == 2:
for obj1 in temps[0]:
for obj2 in temps[1]:
new_objs.append(u'{} {}'.format(obj2, obj1))
elif len(temps) == 1:
new_objs = [x for x in temps[0]]
else:
break
new_ments = [mentions[objects.index(object)] for x in new_objs]
del mentions[objects.index(object)]
objects.remove(object)
mentions.extend(new_ments)
objects.extend(new_objs)
# print('NEW STATE:')
# print('|||'.join(objects))
# print('|||'.join([str(x) for x in mentions]))
if len(mentions) == len(objects):
for object in objects:
if object not in object_dict:
if mentions[objects.index(object)] != 0:
object_dict[object] = [mentions[objects.index(object)], 1]
else:
if add_null_objects:
object_dict[object] = [mentions[objects.index(object)], 1]
else:
object_dict[object][0] += mentions[objects.index(object)] ; object_dict[object][1] += 1
print('Added some objects.')
else:
print('The length thing again.')
for object in object_dict:
assert isinstance(object_dict[object][0], int) or isinstance(object_dict[object][0], float)
if punish_singletext:
if object_dict[object][1] == 1:
object_dict[object][0] = float(object_dict[object][0]) / punish_singletext
if kill:
if object_dict[object][1] <= kill:
del object_dict[object]
if dump_path:
with open(dump_path, 'w') as dump_file:
pickle.dump(object_dict, dump_file)
return object_dict
def get_objects_with_P(objdict_R, objdict_IR, L, dump_path = None):
#objects_foruse = objdict_R.copy()
objects_with_P = {}
for object in objdict_R:
if object in objdict_IR:
objects_with_P[object] = float(objdict_R[object][0] + objdict_IR[object][0] + 1) / float(L)
else:
objects_with_P[object] = float(objdict_R[object][0] + 1) / float(L)
#assert objdict_R[object] != 0
#P = float(objdict_R[object]) / float(L)
for object in objects_with_P:
assert objects_with_P[object] != 0
#objects_with_P[object] = P
if dump_path:
with open(dump_path, 'w') as dump_file:
pickle.dump(objects_with_P, dump_file)
print('Got Ps.')
return objects_with_P
def log_binomial_tail_objs(objects_with_P, objdict_R, L, dump_path=None):
objects_with_lbt = {}
for object in objects_with_P:
Nr = float(objdict_R[object][0])
P = float(objects_with_P[object])
assert Nr != 0
assert L != 0
assert P != 0
#assert m.sqrt(L*P*(1-P)) != 0
sigma_deviation = -((Nr - L * P) / m.sqrt((L*P*(1-P))))
#print('Did {} - {} * {} / m.sqrt({} * {} * (1 - {}), got {}'.format(Nr, L, P, L, P, P, MLC))
if sigma_deviation > 2:
lbt = 0
elif sigma_deviation >= -2:
lbt = np.polyval(lbtc0, sigma_deviation)
elif sigma_deviation >= -11:
lbt = np.polyval(lbtc1, sigma_deviation)
elif sigma_deviation >= -30:
lbt = np.polyval(lbtc2, sigma_deviation)
elif sigma_deviation >= -120:
lbt = np.polyval(lbtc3, sigma_deviation)
else:
lbt = -10755.11947 + 124.0043967 * (sigma_deviation + 120)
objects_with_lbt[object] = [lbt, -sigma_deviation]
# for object in objects_with_lbt:
# assert objects_with_lbt[object] != 0
if dump_path:
with open(dump_path, 'w') as dump_file:
pickle.dump(objects_with_lbt, dump_file)
print('Got binomial tail logarithm scores.')
return objects_with_lbt
# def get_lnerfs(moivre_laplace_objs, dump_path = None):
# objs_with_lnerfs = {}
# for object in moivre_laplace_objs:
# #print(m.erf(moivre_laplace_objs[object]))
# try:
# lnerf = m.log(m.erf(moivre_laplace_objs[object]))
# except ValueError:
# lnerf = 1234.0
# objs_with_lnerfs[object] = lnerf
# if dump_path:
# with open(dump_path, 'w') as dump_file:
# pickle.dump(objs_with_lnerfs, dump_file)
#
# print('Got lnerfs.')
#
# return objs_with_lnerfs
def get_keywords(hits_object, dump_path=None, punish_singletext=None, punish_length=False, kill=0 ):
keyword_dict = {}
for hit in hits_object:
try:
keywords = hit['keywords'].split('|||')
except AttributeError:
keywords = []
print('Skipped text with no keywords.')
for keyword in keywords:
try:
words = tokenizer.tokenize(hit['text'])
length = len(words)
except TypeError:
words = []
length = 5000
count = 1
for word in words:
if word[:6] == keyword[:6]:
count += 1
if punish_length:
if length > 1000:
count = count / (float(1000)/length)
if keyword in keyword_dict:
keyword_dict[keyword][0] += count ; keyword_dict[keyword][1] += 1
else:
keyword_dict[keyword] = [count, 1]
print('Got a keyword')
for key in keyword_dict:
if punish_singletext:
if keyword_dict[key][1] == 1:
keyword_dict[key][0] = float(keyword_dict[key][0]) / punish_singletext
if kill:
if keyword_dict[key][1] <= kill:
del keyword_dict[key]
if dump_path:
with open(dump_path, 'w') as dump_file:
pickle.dump(keyword_dict, dump_file)
return keyword_dict
def process_keyword_probabilities(keydict_R, keydict_IR, L, Lr, dump_path=None):
keywords_p = {}
for keyword in keydict_R:
if keyword in keydict_IR:
N = float(keydict_IR[keyword][0] + keydict_R[keyword][0] + 1)
else:
N = float(keydict_R[keyword][0] + 1)
P = float(N) / L
Nr = float(keydict_R[keyword][0])
sigma_deviation = -((Nr - Lr *P) / (m.sqrt(Lr*P*(1-P))))
if sigma_deviation > 2:
lbt = 0
elif sigma_deviation >= -2:
lbt = np.polyval(lbtc0, sigma_deviation)
elif sigma_deviation >= -11:
lbt = np.polyval(lbtc1, sigma_deviation)
elif sigma_deviation >= -30:
lbt = np.polyval(lbtc2, sigma_deviation)
elif sigma_deviation >= -120:
lbt = np.polyval(lbtc3, sigma_deviation)
else:
lbt = -10755.11947 + 124.0043967 * (sigma_deviation + 120)
keywords_p[keyword] = [lbt, -sigma_deviation]
if dump_path:
with open(dump_path, 'w') as dump_file:
pickle.dump(keywords_p, dump_file)
return keywords_p
def dumb_formula(decile_one, reverse, objdict_R, objdict_IR):
scored_objects = {}
for object, count in objdict_R.items():
x = float(count[0])
V = len(decile_one)
if object in objdict_IR:
x0 = float(objdict_IR[object][0]) + 1
else:
x0 = float(1)
V0 = len(reverse)
assert V0 != 0
assert V != 0
assert (x0/V0) != 0
object_score = (x / V) / (x0 / V0)
scored_objects[object] = int(round(object_score))
sorted_objects = sorted(scored_objects.items(), key=operator.itemgetter(1))
return sorted_objects
def show_by_quantile(n, number, hits_obj):
quantile_range = len(hits_obj) // n
initial_range = 0
quantile_number = 1
while initial_range < len(hits_obj):
decile = hits_obj[initial_range:(initial_range + quantile_range)]
print('\n-------------------------------\n')
print('THOSE ARE THE TOP {} TEXTS FROM {}-quantile {}:\n'.format(str(number), str(n), str(quantile_number)))
for hit in decile[:number]:
print('-----------------')
print(hit['title'])
print('\n')
print(hit['text'])
initial_range += quantile_range
quantile_number += 1
def write_sortedlist(path, items):
with codecs.open(path, 'a', encoding='utf-8') as file:
for tuple in items:
file.write(tuple[0])
file.write(' ')
file.write(str(tuple[1]))
file.write('\n')
def get_objects_by_prob_comp(objects_w_p, objdict_Nr, objdict_Nir, L_rel, L_ir, cut_infrequent=True, dump_path=None):
objects_by_prob_comp = {}
for object in objdict_Nr:
Pr = float(objdict_Nr[object][0]) / float(L_rel)
try:
Pir = float(objdict_Nir[object][0]) / float(L_ir)
except KeyError:
Pir = 1 / float(L_ir)
score = Pr / Pir
if cut_infrequent:
if objdict_Nr[object][1] <= 1:
score = score / 4
objects_by_prob_comp[object] = score
if dump_path:
with open(dump_path, 'w') as dump_file:
pickle.dump(objects_by_prob_comp, dump_file)
return objects_by_prob_comp
def get_words(hits_object):
words = []
for hit in hits_object:
words.extend(tokenizer.tokenize(hit['text']))
return words
def make_colloc_network(collocs):
network = nx.Graph()
for x, y in collocs:
network.add_node(x)
network.add_node(y)
network.add_edge(x, y)
print('Addded an edge.')
return network
def do_iteration_1(decile_one, deciles2_10, reverse, dumpobj, dumpkey):
dumpkeyfile = codecs.open(dumpkey, 'w', encoding='utf-8')
dumpobjfile = codecs.open(dumpobj, 'w', encoding='utf-8')
# L = find_L(all)
# with open('length_all.pkl', 'w') as dump:
# pickle.dump(L, dump)
L = pickle.load(open('length_all.pkl'))
#Lr = find_L(decile_one)
Lr = find_L(decile_one)
#pickle.dump(Lr, open('length_relevant.pkl', 'w'))
key_R = get_keywords(decile_one, dump_path='key_R_fixed.pkl')
key_reverse = get_keywords(reverse, dump_path='key_reverse_fixed.pkl')
key_2_10 = get_keywords(deciles2_10, dump_path='key_2_10_fixed.pkl')
key_IR = {}
# key_R = pickle.load(open('key_R_fin.pkl'))
# key_IR = pickle.load(open('key_IR_fin.pkl'))
for key in key_reverse:
if key in key_2_10:
key_IR[key] = (key_reverse[key][0] + key_2_10[key][0], key_reverse[key][1] + key_2_10[key][1])
else:
key_IR[key] = key_reverse[key]
pickle.dump(key_IR, open('key_IR_fixed.pkl', 'w'))
keyword_probs = process_keyword_probabilities(key_R, key_IR, L, Lr, 'key_probs_last.pkl')
keys_sorted = sorted([(x, y[0]) for x,y in keyword_probs.items()], key=operator.itemgetter(1))
for key, prob in keys_sorted:
try:
dumpkeyfile.write(key + ',' + str(prob) + ',' + str(key_R[key][0]) + ',' + str(key_R[key][1]) +
',' + str(key_IR[key][0]) + str(key_IR[key][0]) + ',' + str(keyword_probs[key][1]))
dumpkeyfile.write('\n')
except:
dumpkeyfile.write(key + ',' + str(prob) + ',' + str(key_R[key][0]) + ',' + str(key_R[key][1])
+ ',' + str(1) + ',' + str(0)
+ ',' + str(keyword_probs[key][1]))
dumpkeyfile.write('\n')
objdict_R = get_object_dictionary(decile_one, dump_path='objdict_R_fixed.pkl')
objdict_reverse = get_object_dictionary(reverse, dump_path='objdict_rev_fixed.pkl') # [FIX: SHOULD INCLUDE DEC 2-10]DONE
objdict2_10 = get_object_dictionary(deciles2_10, dump_path='objdict2_10_fixed.pkl')
objdict_IR = {}
for obj in objdict_reverse:
if obj in objdict2_10:
objdict_IR[obj] = [objdict_reverse[obj][0] + objdict2_10[obj][0], objdict_reverse[obj][1] + objdict2_10[obj][1]]
else:
objdict_IR[obj] = objdict_reverse[obj]
pickle.dump(objdict_IR, open('objdict_IR_fixed.pkl', 'w'))
objects_with_P = get_objects_with_P(objdict_R, objdict_IR, L, dump_path='objects_with_P_fixed.pkl')
lbt_objects = log_binomial_tail_objs(objects_with_P, objdict_R, Lr, dump_path='lbt_objects_fixed.pkl')
lbt_sorted = sorted([(x, y[0]) for x,y in lbt_objects.items()], key=operator.itemgetter(1))
for x in lbt_sorted:
try:
dumpobjfile.write(x[0] + '|' + str(x[1]) + '|' + str(objdict_R[x[0]][0]) + '|'+ str(objdict_R[x[0]][1]) +
'|' + str(objdict_IR[x[0]][0]) + '|' + str(objdict_IR[x[0]][1])
+ '|' + str(lbt_objects[x[0]][1]))
except KeyError:
dumpobjfile.write(x[0] + '|' + str(x[1]) + '|' + str(objdict_R[x[0]][0]) + '|'+ str(objdict_R[x[0]][1])
+ '|' + str(1) + '|' + str(0)
+ '|' + str(lbt_objects[x[0]][1]))
dumpobjfile.write('\n')
dumpkeyfile.close()
dumpobjfile.close()
def do_iteration_2(objects, keywords):
lnerfed = pickle.load(open(objects, 'r'))
probs_ml = {k:v for k,v in lnerfed.items() if v == 0}
search_string_objs = ' '.join(['\"' + x + '\"' for x in probs_ml.keys()][:50]).encode('utf-8')
key_probs = pickle.load(open(keywords, 'r'))
keys_ml = {k:v for k,v in key_probs.items() if v == 0}
search_string_keys = ' '.join(['\"' + x + '\"' for x in keys_ml.keys()][:50]).encode('utf-8')
fin_hits = indexer.search('stemmed_text:(целеваядат девян "90-е" "90-х" "90-ым") objects:({0}) keywords:({1})'.format(search_string_objs, search_string_keys))
to_process = fin_hits[:(len(fin_hits) / 12)]
return to_process
def do_iteration_2_csv(objects, keywords=''):
line = []
with open(objects, 'r') as csvfile:
objs = csv.reader(csvfile)
for row in objs:
line.append('\"'+row[0]+'\"')
line = ' '.join(line)
if keywords:
keywordline = []
with open(keywords, 'r') as csvfile:
objs = csv.reader(csvfile)
for row in objs:
keywordline.append('\"'+row[0]+'\"')
keywordline = ' '.join(keywordline)
print(keywordline)
nhits = indexer.search('stemmed_text:(целеваядат девян "90-е" "90-х" "90-ым") objects:({0}) keywords:({1})'.format(line, keywordline))
nreverse = indexer.search('*:* NOT stemmed_text:(целеваядат девян "90-е" "90-х" "90-ым") NOT objects:({0}) NOT keywords:({1})'.format(line, keywordline))
assert len(nhits) + len(nreverse) == len(all)
return nhits, nreverse
nhits = indexer.search('stemmed_text:(целеваядат девян "90-е" "90-х" "90-ым") objects:({0})'.format(line))
nreverse = indexer.search('*:* NOT stemmed_text:(целеваядат девян "90-е" "90-х" "90-ым") NOT objects:({0})'.format(line))
assert len(nhits) + len(nreverse) == len(all)
return nhits, nreverse
def draw_graph(nx_graph, show=True):
layout = nx.spring_layout(nx_graph)
nx.draw_networkx_nodes(nx_graph, pos=layout, nodelist=nx_graph.nodes())
nx.draw_networkx_edges(nx_graph, pos=layout, edgelist=nx_graph.edges())
nx.draw_networkx_labels(nx_graph, pos=layout)
if show:
plt.show()
def filter(w):
counter = 0
for i in unicode(w):
if i.isupper() and counter != 0:
return True
return False
def network_shenanigans(hits_obj, n):
bigram_measures = nltk.collocations.BigramAssocMeasures()
words = get_words(hits_obj)
finder = BigramCollocationFinder.from_words(words)
finder.apply_freq_filter(3)
finder.apply_word_filter(lambda w: w in stopwords)
finder.apply_word_filter(lambda w: re.search('[0-9]', w))
finder.apply_word_filter(lambda w: re.search('[a-zA-Z]', w))
finder.apply_word_filter(lambda w: w.startswith(u'Ъ'))
finder.apply_word_filter(lambda w: filter(w))
scored = finder.score_ngrams(bigram_measures.pmi)
print('Total bigrams:')
print(len(scored))
print(scored[5000])
print('Total collocations:')
collocations = [x[0] for x in scored if x[1] > n]
print(len(collocations))
graph = make_colloc_network(collocations)
print('Connected components:')
print(nx.number_connected_components(graph))
return json_graph.node_link_data(graph)
|
def CountPoints(word):
dic={"e":1,"a":1,"i":1,"o":1,"n":1,"r":1,"t":1,"l":1,"s":1,"u":1,
"d":2,"g":2,
"b":3,"c":3,"m":3,"p":3,
"f":4,"h":4,"v":4,"w":4,"y":4,
"k":5,
"j":8,"x":8,
"q":10,"z":10}
word=word.lower()
score=0
for ind in word:
score+=dic[ind]
return score
def FindWord(txt):
words=txt.split()
max=0
word=""
for i in words:
if len(i)>10:
continue
tmp=CountPoints(i)
if max<tmp:
max=tmp
word=i
return word
if __name__ == '__main__':
pass
simpeText="I have a Question"
print("The word is: ", FindWord(simpeText)) |
from firebase_admin import db
from datetime import date, timedelta
from .follow import get_user_following_uid_list
# NEWSFEED 데이터베이스 구조
"""
'NEWSFEED':
{
'uid':
{
'nickname': '닉네임',
'snapshot': [timestamp1, timestamp2, ...]
},
...
}
"""
# follow 목록을 가져와서
# 내가 follow하는 사람만 다 가져와서
# timestamp로 정렬해서 앞에서부터 짤라서 주기
def make_newsfeed(uid, nickname):
dir = db.reference('NEWSFEED')
dir.update({uid: {'nickname': nickname}})
def get_all_newsfeed():
return db.reference('NEWSFEED').get()
def get_newsfeed_one_uid(uid):
dir = db.reference('NEWSFEED').child(str(uid))
return dir.get()
def get_newsfeed_uid(uid):
lst = get_user_following_uid_list(uid)
print(lst)
ret = []
if lst:
for follow in lst:
_newsfeed = get_newsfeed_one_uid(follow)
if _newsfeed and 'snapshot' in _newsfeed:
for _newstime in _newsfeed['snapshot']:
_ret = {}
_ret['timestamp'] = _newstime
_ret['uid'] = follow
_ret['nickname'] = _newsfeed['nickname']
ret.append(_ret)
ret = sorted(ret, key = (lambda x:x['timestamp']), reverse=True)
return ret
else:
return None
def add_snap(uid, timestamp):
dir = db.reference('NEWSFEED').child(str(uid)).child('snapshot')
t = dir.get()
if t is None:
t = [timestamp]
else:
t.append(timestamp)
dir = db.reference('NEWSFEED').child(str(uid))
dir.update({'snapshot': t})
def del_snap(uid, timestamp):
dir = db.reference('NEWSFEED').child(str(uid)).child('snapshot')
snap = dir.get()
if timestamp in snap:
snap.remove(timestamp)
dir = db.reference('NEWSFEED').child(str(uid))
dir.update({'snapshot': snap})
return True
else:
return False
def mod_nick(uid, nickname):
dir = db.reference('NEWSFEED').child(str(uid))
if dir.get() is not None:
dir.update({'nickname': nickname})
return True
else:
return False
def remove_old_newsfeed():
dir = db.reference('NEWSFEED')
all_news = dir.get()
last_day = (date.today() - timedelta(6)).strftime('%Y%m%d')
for uid in all_news:
if not 'snapshot' in all_news[uid]:
continue
cnt = 0
for i in range(len(all_news[uid]['snapshot'])):
if all_news[uid]['snapshot'][i][:8] < last_day:
cnt = cnt + 1
all_news[uid]['snapshot'] = all_news[uid]['snapshot'][cnt:]
_dir = dir.child(uid)
_dir.update({'snapshot': all_news[uid]['snapshot']}) |
from bs4 import BeautifulSoup
from urllib2 import urlopen
import csv
base_url = ("http://espn.go.com/college-football/rankings")
soup = BeautifulSoup(urlopen(base_url).read())
teams = soup.find_all("td", "align-left team")
team_urls = [td.a["href"] for td in teams]
with open("data/src-ESPN_NCAAF_teams.tsv", "w") as f:
fieldnames = ("rank", "team", "record", "points", "trend")
output = csv.writer(f, delimiter="\t")
output.writerow(fieldnames)
for url in team_urls:
url = url.replace("http://espn.go.com", "") # inconsistent URL
page = urlopen("http://espn.go.com{0}".format(url))
team1 = BeautifulSoup(page.read()).find("td", {"id": "team"})
rank = team1.find("td", {"id": "rank"}).encode_contents().strip()
team2 = team1.h1.encode_contents().strip().split("<br/>")[0]
restaurant = team1.h1.span.encode_contents()
description = team1.p.encode_contents().strip()
output.writerow([rank, team, record, points, trend])
print "Done writing file" |
__author__ = 'sujunfeng'
user_info_dict = {
'jfsu':{
'password':'abc123!',
'salary':30000,
'buying_list':[]
},
'mmm':{
'password':'123123',
'salary':50000,
'buying_list':[]
}
}
with open('user_info_list.txt','w',encoding="utf-8") as f_init:
f_init.write(str(user_info_dict)) |
import Ntreefunctions as nt
import matplotlib.pyplot as plt
import U_Eigenvalues as egn
import Regression_Fits as rg
from numpy import *
'''
This code is for generating A(M),B(M),Rho,Gamma for the function epsilon(N,M)
The user specifies the min/max values for N and M
The code generates the matrix U's, finds their eigenvalues/eigenvectors via U_Eigenvalues functions,
caluclates the leading order eigenangles, and uses them in best fits
The code calls on the module Regression_Fits.py for fitting / plotting
Final constants for epsilon(N,M) are printed
'''
#-------------- Setting the Size --------------------------------
N_max = 14
N_min = 2 #Define the min / max graph sizes (not recommended going over N=10 / M=15)
M_max = 8 #Minimum N/M that will work is 2
M_min = 2
#Creating Arrays / Matrices to store values
Angles = zeros(shape=(N_max-N_min+1,M_max-M_min+1))
Betas = zeros(shape=(N_max-N_min+1,M_max-M_min+1))
N_axis = zeros(N_max-N_min+1) #arrays for plotting
M_axis = zeros(M_max-M_min+1)
#-------------- Running The Code --------------------------------
for i in arange(N_min,N_max+1):
N = int(i)
N_axis[i-N_min] = N
for j in arange(M_min,M_max+1):
#-----------Generating U Matrices--------------
M = int(j)
M_axis[j-M_min] = M
Int,eigen = egn.Find_Eigen(N,M)
#-------------Extracting Info-----------------
B,B_r,B_angle = egn.Overlaps(Int,eigen) #stores the overlaps of each eiegenstate with the initial state of the system
Estates_r,Estates_angle = egn.Eigenstates_Polar(eigen[1]) #stores all the eiegenstates as polar values
Evalues_angle = egn.Eigenvalues_Polar(eigen[0]) #stores all the eigenvalues as polar values
top2 = egn.Find_Largest(B_r) #finds the lcoation of the largest two Beta's
index = int(top2[0])
#------------
Angles[i-N_min,j-M_min] = abs(Evalues_angle[index])
Betas[i-N_min,j-M_min] = B_r[index]
#----------- Power Fits for Constant M ----------------------------
Am = zeros( len(M_axis))
Bm = zeros( len(M_axis))
rm = zeros( len(M_axis))
plot_choices = array([2,3,4,8]) #select which values of M to plot
for i in arange( len( M_axis) ):
Am[i],Bm[i],rm[i] = rg.Power_Regression(N_axis,Angles[:,i]) #calculates the A,B constants as well as r (correlation coefficient)
#-----------------Best Fits for A(M) and B(M)---------------------
alpha,beta,ra = rg.Power_Regression(M_axis,Am)
gamma,rho,rb = rg.Linear_Regression(M_axis,Bm)
#-----------------Plotting The Results ---------------------------
rg.Power_Fit_Plot(M_axis,N_axis,Angles,Am,Bm,rm,plot_choices) #comment this out out if you do not wish to plot
print("_____Constants for epsilon(N,M) (in degrees)_____")
print("Alpha: ",round(alpha,5)," Beta: ",round(beta,5))
print("Gamma: ",round(gamma,5)," Rho: ",round(rho,5))
|
print("Welcome to the Love Calculator!")
name1 = input("What is your name? \n")
name2 = input("What is their name? \n")
count1 = 0
count2 = 0
combined_string = name1 + name2
lower_case_string = combined_string.lower()
# T
t = lower_case_string.count("t")
r = lower_case_string.count("r")
u = lower_case_string.count("u")
e = lower_case_string.count("e")
count1 = t + r + u + e
l = lower_case_string.count("l")
o = lower_case_string.count("o")
v = lower_case_string.count("v")
count2 = l + o + v + e
love_less = int(str(count1) + str(count2))
if(love_less < 10 or love_less > 90):
print(f"Your score is {love_less}, you go together like coke and mentos.")
elif(love_less >= 40 and love_less <= 50):
print(f"Your score is {love_less}, you are alright together.")
else:
print(f"Your score is {love_less}") |
# Source: https://www.reddit.com/r/dailyprogrammer/comments/65vgkh/20170417_challenge_311_easy_jolly_jumper/
# Not currently done
def jumper(jolly):
differences = []
for number in range (jolly[1],len(jolly)):
differences.append(jolly[number]-jolly[number+1])
for changes in range(len(differences)-1):
print(differences[changes])
if differences[changes] - 1 != differences[changes+1]:
return False
else:
print("Good")
jumper([4 ,1 ,4 ,2 ,3])
|
from django.core.cache import cache
from django.db import models
# Create your models here.
from main.utils.custom_fields import ContentTypeRestrictedFileField
class Course(models.Model):
"""
Represents a Course that Students registered for. Related to :model:`Lab Group`.
"""
course_code = models.CharField(max_length=128, unique=True, help_text="Course Code (Eg. CZ3002)") # need to set unique
course_name = models.CharField(max_length=128, help_text="Course Name (Eg. Software Engineering)")
def __str__(self):
return self.course_code
class LabGroup(models.Model):
"""
Represents a Lab Group belonging to a specific Course. Related to :model:'Course`.
"""
course = models.ForeignKey(Course, on_delete=models.CASCADE, help_text="Course id")
lab_group_name = models.CharField(max_length=64, help_text="Lab Group Name (Eg. FSP3)")
# Object name for display in admin panel
def __str__(self):
return "%s - %s" % (self.lab_group_name, self.course.course_code)
class LabSession(models.Model):
"""
Represents a Lab Session belonging to a specific Lab Group. Related to :model:'Lab Group`.
"""
session_name = models.CharField(max_length=64, help_text="Session Name (Eg. Lab 1, Lab 2)")
lab_group = models.ForeignKey(LabGroup, on_delete=models.CASCADE, help_text="Lab group id")
date_time_start = models.DateTimeField(null=False, auto_now=False)
date_time_end = models.DateTimeField(null=False, auto_now=False)
# Object name for display in admin panel
def __str__(self):
return "%s - %s" % (self.session_name, self.lab_group.lab_group_name)
class Student(models.Model):
"""
Represents a student. Related to :model:'Lab Group`.
"""
name = models.CharField(max_length=64, help_text="Student Name")
matric = models.CharField(max_length=9, unique=True, help_text="Student Matric Number")
photo = ContentTypeRestrictedFileField(
upload_to=ContentTypeRestrictedFileField.update_student_photo_filename,
content_types=['image/jpg', 'image/png', 'image/gif'],
max_upload_size=1500000,
help_text="Photo of student, there must be exactly 1 face in the photo."
)
# Object name for display in admin panel
def __str__(self):
return self.name + " - " + self.matric
def delete_face_recognition_cache(self):
results = LabGroupStudentPair.objects.filter(student=self)
for result in results:
sessions = LabSession.objects.filter(lab_group=result.lab_group)
for session in sessions:
cache_key = f"session_id_{session.id}"
cache.delete(cache_key)
class AttendanceRecord(models.Model):
"""
Represents a student's attendance for a specific lab session. Related to :model:`student`, :model:`labSession`.
"""
student = models.ForeignKey(Student, on_delete=models.CASCADE, help_text="Student id")
lab_session = models.ForeignKey(LabSession, on_delete=models.CASCADE, help_text="Lab session id")
ATTENDANCE_STATUS = (
("1", "Present"),
("2", "Absent"),
("3", "Late"),
("4", "Absent with valid reason"),
)
status = models.CharField(
max_length=1,
choices=ATTENDANCE_STATUS,
help_text="Attendance status. Available options: 1 - Present, 2 - Absent, 3 - Late, 4 - Absent with valid "
"reason"
)
is_taken_with_facial_recognition = models.BooleanField(
default=False,
help_text="Check if this record is created with facial recognition module"
)
date_time_captured = models.DateTimeField(null=True, help_text="Date Time of Attendance Capture")
date_time_modified = models.DateTimeField(auto_now=True, help_text="Date Time Last Modified")
remarks = models.CharField(
max_length=256,
null=True,
blank=True,
help_text="Special Remarks (Eg. Late due to car breakdown)"
)
class Meta:
unique_together = ('student', 'lab_session',)
def __str__(self):
return "%s | %s | %s" % (self.student.matric, self.lab_session.lab_group, self.status)
class LabGroupStudentPair(models.Model):
"""
Represents a Lab Group belonging to a specific Course. Related to :model:'Course`.
"""
lab_group = models.ForeignKey(LabGroup, on_delete=models.CASCADE, help_text="Lab Group id")
student = models.ForeignKey(Student, on_delete=models.CASCADE, help_text="Student id")
# Object name for display in admin panel
def __str__(self):
return "%s - %s ( %s )" % (self.student.matric, self.lab_group.course, self.lab_group.lab_group_name)
# class AccountProfile(models.Model):
# """
# Represents a account's profile. Related to :model:`auth.User` and :model:`main.Class`.
# """
# account_User = models.OneToOneField(User, on_delete=models.CASCADE, related_name="student_profile")
# privilege_level = (
# "Admin",
# "Course Coord",
# "TA",
# )
# status = models.CharField(
# max_length=1,
# choices=privilege_level,
# help_text="Admin, Course Coord, TA"
# )
# userName = models.CharField(max_length=64, null=False, unique=True)
# email = models.CharField(max_length=64, null=False, unique=True)
# has_reset_password = models.BooleanField(default=False)
#
# def __str__(self):
# return self.email + " : " + self.userName + " (" + self.status + ")"
|
import sys
from scrapy import cmdline
sys.path.append("../")
# cmdline.execute(["scrapy", "crawl", "douban_spider"])
cmdline.execute(["scrapy", "crawl", "douban_spider", "-o", "mingyan.json"]) |
#!/usr/bin/env python
from os import path, makedirs
import errno
from time import sleep
from argparse import ArgumentParser
server_default_path = '/usr/local/bin/'
file_extensions = ['csv','txt']
parser = ArgumentParser(description='Automatizacao dos script de cria_lista em shell')
parser.add_argument('-e','--extension', help='A extensao do arquivo. Por exemplo: csv', required=False, default='csv')
parser.add_argument('-n','--name', help='Nome do mailing', required=True, default='mailing_sem_nome')
parser.add_argument('-p','--path', help='Diretorio de destino do arquivo', required=False, default='/usr/local/bin/')
parser.add_argument('-f','--format', help='Formato da fila. Por exemplo: 103', required=False)
parser.add_argument('-s','--password', help='Senha do banco', required=False, default='atlanta@121')
args = parser.parse_args()
def number_format(num_format):
end_format = []
if num_format is None:
num_format = '[1-9][0-9][0-9][0-9][1-9]'
return num_format
if num_format.isdigit():
num_format.split()
for i in num_format:
end_format.append(i)
if len(end_format) == 4:
end_format.append('1-9')
num_format = end_format
elif len(end_format) == 3:
end_format.append('0')
end_format.append('1-9')
num_format = end_format
num_format = '[' + end_format[0] + ']' + '[' + end_format[1] + ']' + '[' + end_format[2] + ']' + '[' + end_format[3] + ']' + '[' + end_format[4] + ']'
return num_format
def filepath_validation(file_path):
global server_default_path
full_path = server_default_path + file_path
if full_path == '/usr/local/bin//usr/local/bin/':
full_path = server_default_path
if full_path[-1:] is not '/':
full_path = full_path + '/'
if not path.exists(full_path):
try:
makedirs(full_path)
print("Exporting to => ", full_path)
sleep(1)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
print(errno.EEXIST)
raise
return full_path
def file_extension_validation(file_extension):
if file_extension not in file_extensions:
file_extension = 'csv'
if '.' in file_extension[0:]:
file_extension = file_extension.replace('.', '')
return file_extension
def cria_lista_shell():
file_extension = args.extension
name_mailling = args.name
file_path = args.path
num_format = args.format
password = args.password
full_path = filepath_validation(file_path)
file_extension = file_extension_validation(file_extension)
file_name = '{0}/cria_lista_{1}'.format(full_path, name_mailling)
num_formatting = number_format(num_format)
shell = '''
#Conteudo para Cron
#* * * * * {3}cria_lista_{0}.sh > /var/log/contact/cria_lista_{0}.log 2> /var/log/contact/cria_lista_{0}.log
#cria a lista santander parcela
if [ `find /tmp/{2}_{0}.{1} | wc -l` -ge 1 ]
then
for V_LISTA in `find /tmp/{2}_{0}.{1} | cut -c6-10`
do
SERVICE='listawork_{0}.sql'
if ps ax | grep -v grep | grep $SERVICE > /dev/null
then
echo "$SERVICE service running, do not rerum me"
exit
else
echo "$SERVICE is not running run me"
fi
sed $'s/[^[:print:]\\t]//g' /tmp/${{V_LISTA}}_{0}.{1} > /tmp/${{V_LISTA}}_{0}.out
mv /tmp/${{V_LISTA}}_{0}.out /tmp/${{V_LISTA}}_{0}.{1}
sed -e "s/9999/$V_LISTA/" {3}cria_lista_{0}.sql > {3}listawork_{0}.sql
mysql --user=root --password={4} iogurte < {3}listawork_{0}.sql > /var/log/contact/listawork_{0}.log 2> /var/log/contact/listawork{0}.log
mv /tmp/${{V_LISTA}}_{0}.{1} /tmp/${{V_LISTA}}_{0}.{1}.importado
done
fi
'''.format(name_mailling, file_extension, num_formatting, full_path, password)
with open (file_name + '.sh', 'w') as shell_file:
shell_file.write(shell)
if __name__ == '__main__':
cria_lista_shell() |
# 6kyu - Number , number ... wait LETTER !
""" Your task is to write a function named do_math that receives a single argument.
This argument is a string that contains multiple whitespace delimited numbers.
Each number has a single alphabet letter somewhere within it.
Example : "24z6 1x23 y369 89a 900b"
As shown above, this alphabet letter can appear anywhere within the number.
You have to extract the letters and sort the numbers according to their corresponding letters.
Example : "24z6 1x23 y369 89a 900b" will become 89 900 123 369 246 (ordered according to the alphabet letter)
Here comes the difficult part, now you have to do a series of computations on the numbers you have extracted.
The sequence of computations are + - * /.
Basic math rules do NOT apply, you have to do each computation in exactly this order.
This has to work for any size of numbers sent in (after division, go back to addition, etc).
In the case of duplicate alphabet letters, you have to arrange them according to the number
that appeared first in the input string.
Remember to also round the final answer to the nearest integer.
Examples :
"24z6 1x23 y369 89a 900b" = 89 + 900 - 123 * 369 / 246 = 1299
"24z6 1z23 y369 89z 900b" = 900 + 369 - 246 * 123 / 89 = 1414
"10a 90x 14b 78u 45a 7b 34y" = 10 + 45 - 14 * 7 / 78 + 90 - 34 = 60 """
# import re
# def do_math(s):
# sep = [(re.findall('\D', x)[0], re.sub('\D', '', x)) for x in s.split()]
# nums = [x[1] for x in sorted(sep, key=lambda xs: xs[0])]
# acc = nums[0]
# operations = '+-*/'
# for i in range(1, len(nums)):
# acc = eval(f'{acc}{operations[(i-1)%4]}{nums[i]}')
# return round(float(acc))
from re import fullmatch
from itertools import cycle
from functools import reduce
from operator import add, sub, mul, truediv as div
def do_math(s):
sep = [fullmatch('(\d*)([a-z]+)(\d*)', x).groups() for x in s.split()]
nums = [int(a + c) for a, _, c in sorted(sep, key=lambda x: x[1])]
operation = cycle([add, sub, mul, div])
return round(reduce(lambda a, b: next(operation)(a, b), nums))
q = do_math("24z6 1z23 y369 89z 900b"), 1414
q
q = do_math("24z6 1x23 y369 89a 900b"), 1299
q
q = do_math("10a 90x 14b 78u 45a 7b 34y"), 60
q
q = do_math("111a 222c 444y 777u 999a 888p"), 1459
q
q = do_math("1z 2t 3q 5x 6u 8a 7b"), 8
q
|
{
"targets": [{
"target_name": "lwip_encoder",
"sources": [
# LWIP:
#######
"src/encoder/init.cpp",
"src/encoder/jpeg_worker.cpp",
"src/encoder/png_worker.cpp",
"src/encoder/gif_worker.cpp",
# LIB JPEG:
###########
"src/lib/jpeg/jdatadst.c",
"src/lib/jpeg/jmemnobs.c",
"src/lib/jpeg/jcomapi.c",
"src/lib/jpeg/jerror.c",
"src/lib/jpeg/jfdctflt.c",
"src/lib/jpeg/jfdctfst.c",
"src/lib/jpeg/jfdctint.c",
"src/lib/jpeg/jidctflt.c",
"src/lib/jpeg/jidctfst.c",
"src/lib/jpeg/jidctint.c",
"src/lib/jpeg/jutils.c",
"src/lib/jpeg/jmemmgr.c",
"src/lib/jpeg/jaricom.c",
"src/lib/jpeg/jquant1.c",
"src/lib/jpeg/jquant2.c",
"src/lib/jpeg/jcapimin.c",
"src/lib/jpeg/jcapistd.c",
"src/lib/jpeg/jccoefct.c",
"src/lib/jpeg/jccolor.c",
"src/lib/jpeg/jcdctmgr.c",
"src/lib/jpeg/jchuff.c",
"src/lib/jpeg/jcinit.c",
"src/lib/jpeg/jcmainct.c",
"src/lib/jpeg/jcmarker.c",
"src/lib/jpeg/jcmaster.c",
"src/lib/jpeg/jcparam.c",
"src/lib/jpeg/jcprepct.c",
"src/lib/jpeg/jcsample.c",
"src/lib/jpeg/jcarith.c",
# LIB PNG:
##########
"src/lib/png/png.c",
"src/lib/png/pngset.c",
"src/lib/png/pngget.c",
"src/lib/png/pngtrans.c",
"src/lib/png/pngmem.c",
"src/lib/png/pngerror.c",
"src/lib/png/pngwrite.c",
"src/lib/png/pngwutil.c",
"src/lib/png/pngwio.c",
"src/lib/png/pngwtran.c",
# ZLIB:
#######
"src/lib/zlib/adler32.c",
"src/lib/zlib/crc32.c",
"src/lib/zlib/gzlib.c",
"src/lib/zlib/zutil.c",
"src/lib/zlib/gzwrite.c",
"src/lib/zlib/compress.c",
"src/lib/zlib/deflate.c",
"src/lib/zlib/trees.c",
# LIB GIF:
##########
"src/lib/gif/egif_lib.c",
"src/lib/gif/gif_err.c",
"src/lib/gif/gifalloc.c",
"src/lib/gif/gif_hash.c",
"src/lib/gif/quantize.c"
],
'include_dirs': [
'<!(node -e "require(\'nan\')")',
'src/encoder',
'src/lib/zlib',
'src/lib/jpeg',
'src/lib/cimg',
'src/lib/png',
'src/lib/gif'
],
'conditions': [
['OS=="freebsd"', {
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
}],
['OS=="solaris"', {
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
}],
['OS=="linux"', {
'cflags!': ['-fno-exceptions'],
'cflags_cc!': ['-fno-exceptions'],
}],
['OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES'
},
'include_dirs': ['/usr/include/malloc']
}],
['OS=="win"', {
'configurations': {
'Release': {
'msvs_settings': {
'VCCLCompilerTool': {
'ExceptionHandling': 1
}
}
}
},
'include_dirs': ['src/win']
}]
]
},
{
"target_name": "action_after_build",
"type": "none",
"dependencies": [ "<(module_name)" ],
"copies": [
{
"files": [ "<(PRODUCT_DIR)/<(module_name).node" ],
"destination": "<(module_path)"
}
]
}]
}
|
from o3seespy.command.nd_material.base_material import NDMaterialBase
class FluidSolidPorous(NDMaterialBase):
"""
The FluidSolidPorous NDMaterial Class
FluidSolidPorous material couples the responses of two phases: fluid and solid. The fluid phase response is only
volumetric and linear elastic. The solid phase can be any NDMaterial. This material is developed to simulate the
response of saturated porous media under fully undrained condition.
"""
op_type = 'FluidSolidPorous'
def __init__(self, osi, nd, soil_mat, combined_bulk_modul, pa=101.0):
r"""
Initial method for FluidSolidPorous
Parameters
----------
osi: o3seespy.OpenSeesInstance
nd: float
Number of dimensions, 2 for plane-strain, and 3 for 3d analysis.
soil_mat: obj
The material number for the solid phase material (previously defined).
combined_bulk_modul: float
Combined undrained bulk modulus :math:`b_c` relating changes in pore pressure and volumetric strain, may be
approximated by: :math:`b_c \approx b_f /n` where :math:`b_f` is the bulk modulus of fluid phase (2.2x106 kpa (or
3.191x105 psi) for water), and :math:`n` the initial porosity.
pa: float, optional
Optional atmospheric pressure for normalization (typically 101 kpa in si units, or 14.65 psi in english
units)
"""
self.osi = osi
self.nd = float(nd)
self.soil_mat = soil_mat
self.combined_bulk_modul = float(combined_bulk_modul)
self.pa = float(pa)
if osi is not None:
osi.n_mat += 1
self._tag = osi.n_mat
self._parameters = [self.op_type, self._tag, self.nd, self.soil_mat.tag, self.combined_bulk_modul, self.pa]
if osi is None:
self.built = 0
if osi is not None:
self.to_process(osi)
def set_update_material_stage(self, value, ele=None, eles=None):
self.set_parameter(self.osi, 'updateMaterialStage', value, ele, eles)
def set_combined_bulk_modulus(self, value, ele=None, eles=None):
self.set_parameter(self.osi, 'combinedBulkModulus', value, ele, eles)
|
import urllib3
from manga_py.provider import Provider
from .helpers.std import Std
class MangaTownCom(Provider, Std):
def get_archive_name(self) -> str:
idx = self.get_chapter_index().split('-')
return 'vol_{:0>3}-{}'.format(*idx)
def get_chapter_index(self) -> str:
idx = self.re.search('/manga/[^/]+/c([^/]+)', self.chapter)
return '-'.join(idx.group(1).split('.'))
def get_main_content(self):
return self._get_content('{}/manga/{}/')
def get_manga_name(self) -> str:
return self._get_name('/manga/([^/]+)/?')
def get_chapters(self):
return self.document_fromstring(self.content, '.chapter_list a')
def prepare_cookies(self):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self._http_kwargs['verify'] = False
def get_files(self):
img_selector = 'img#image'
url = self.http().normalize_uri(self.chapter)
parser = self.html_fromstring(url)
selector = '#top_chapter_list + .page_select select option + option'
images = self._images_helper(parser, img_selector)
for i in parser.cssselect(selector):
url = self.http().normalize_uri(i.get('value'))
img = self.html_fromstring(url)
images += self._images_helper(img, img_selector)
return images
def get_cover(self):
return self._cover_from_content('.detail_info > img')
def book_meta(self) -> dict:
# todo meta
pass
main = MangaTownCom
|
'''
Why does a destructor in a base class need to be declared virtual?
Hints: 421, 460
'''
def virt_baseclass(s):
pass
if __name__ == '__main__':
assert virt_baseclass('') ==
assert virt_baseclass('') ==
assert virt_baseclass('') ==
assert virt_baseclass('') ==
assert virt_baseclass('') ==
|
from tkinter import *
# ---------------------------- PASSWORD GENERATOR ------------------------------- #
# ---------------------------- SAVE PASSWORD ------------------------------- #
# ---------------------------- UI SETUP ------------------------------- #
windows = Tk()
windows.title ("Password Manager")
windows.config(padx = 50, pady = 50)
canvas = Canvas (width=200, height = 200)
lock_img = PhotoImage(file="logo.png")
canvas.create_image(102,100,image=lock_img)
canvas.grid(row=0,column=1)
website_label=Label(text="Website:")
website_label.grid(row=1,column=0)
website_text=Entry(width=35)
website_text.grid(row=1,column=1,columnspan=2)
email_label=Label(text="Email/Username:")
email_label.grid(row=2,column=0)
email_text = Entry(width=35)
email_text.grid(row=2,column=1, columnspan =2)
password_label= Label(text = "Password:")
password_label.grid(row=3,column=0)
password_text = Entry(width=16)
password_text.grid(row=3,column=1)
password_btn= Button(text = "Generate Password")
password_btn.grid(row=3,column=2)
add_btn = Button(text = "Add",width=45)
add_btn.grid(row = 4,column=1,columnspan=2)
windows.mainloop() |
import payconiq
import requests
from .exceptions import PayconiqError
requests.adapters.DEFAULT_RETRIES = 5
class Transaction:
@classmethod
def get_base_url(cls):
return '{base_url}/transactions'.format(
base_url=payconiq.get_base_url()
)
@classmethod
def get_url(cls, id):
return '{base_url}/{id}'.format(
base_url=cls.get_base_url(),
id=id
)
@classmethod
def request(cls, *args, **kwargs):
response = requests.request(*args, **kwargs)
if not 199 < response.status_code < 300:
raise PayconiqError.from_response(
response=response
)
return response
@classmethod
def start(cls, amount, webhook_url, currency='EUR', merchant_token=None):
merchant_token = merchant_token \
if merchant_token is not None else payconiq.merchant_token
response = cls.request(
method='POST',
url=cls.get_base_url(),
headers={
'Authorization': merchant_token,
},
json={
'amount': amount,
'currency': currency,
'callbackUrl': webhook_url,
}
)
return response.json()['transactionId']
@classmethod
def get(cls, id, merchant_token=None):
merchant_token = merchant_token \
if merchant_token is not None else payconiq.merchant_token
response = cls.request(
method='GET',
url=cls.get_url(id),
headers={
'Authorization': merchant_token,
}
)
return response.json()
|
import cv2
import random
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pickle
from glob import glob
import imgaug as ia
from config import *
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
def display_img(img, polygons=[], channels="bgr", size=9):
"""
Function to display an inline image, and draw optional polygons (bounding boxes, convex hulls) on it.
Use the param 'channels' to specify the order of the channels ("bgr" for an image coming from OpenCV world)
"""
if not isinstance(polygons, list):
polygons = [polygons]
if channels == "bgr": # bgr (cv2 image)
nb_channels = img.shape[2]
if nb_channels == 4:
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
fig, ax = plt.subplots(figsize=(size, size))
ax.set_facecolor((0, 0, 0))
ax.imshow(img)
for polygon in polygons:
# An polygon has either shape (n,2),
# either (n,1,2) if it is a cv2 contour (like convex hull).
# In the latter case, reshape in (n,2)
if len(polygon.shape) == 3:
polygon = polygon.reshape(-1, 2)
patch = patches.Polygon(polygon, linewidth=1, edgecolor='g', facecolor='none')
ax.add_patch(patch)
def give_me_filename(dirname, suffixes, prefix=""):
"""
Function that returns a filename or a list of filenames in directory 'dirname'
that does not exist yet. If 'suffixes' is a list, one filename per suffix in 'suffixes':
filename = dirname + "/" + prefix + random number + "." + suffix
Same random number for all the file name
Ex:
> give_me_filename("dir","jpg", prefix="prefix")
'dir/prefix408290659.jpg'
> give_me_filename("dir",["jpg","xml"])
['dir/877739594.jpg', 'dir/877739594.xml']
"""
if not isinstance(suffixes, list):
suffixes = [suffixes]
suffixes = [p if p[0] == '.' else '.' + p for p in suffixes]
while True:
bname = "%09d" % random.randint(0, 999999999)
fnames = []
for suffix in suffixes:
fname = os.path.join(dirname, prefix + bname + suffix)
if not os.path.isfile(fname):
fnames.append(fname)
if len(fnames) == len(suffixes): break
if len(fnames) == 1:
return fnames[0]
else:
return fnames
class Backgrounds():
def __init__(self, backgrounds_pck_fn):
self._images = pickle.load(open(backgrounds_pck_fn, 'rb'))
self._nb_images = len(self._images)
print("Nb of images loaded :", self._nb_images)
def get_random(self, display=False):
bg = self._images[random.randint(0, self._nb_images - 1)]
if display: plt.imshow(bg)
return bg
class Cards():
def __init__(self, cards_pck_fn):
self._cards = pickle.load(open(cards_pck_fn, 'rb'))
# self._cards is a dictionary where keys are card names (ex:'Kc') and values are lists of (img,hullHL,hullLR)
self._nb_cards_by_value = {k: len(self._cards[k]) for k in self._cards}
print("Nb of cards loaded per name :", self._nb_cards_by_value)
def get_random(self, card_name=None, display=False):
if card_name is None:
card_name = random.choice(list(self._cards.keys()))
card = self._cards[card_name][random.randint(0, self._nb_cards_by_value[card_name] - 1)]
if display:
if display: display_img(card, "rgb")
return card[0], card_name
def corner_to_kps(corner, decalX=decalX, decalY=decalY):
"""
Transform corner array into imgaug.keypoints centered on the image
"""
kps = [ia.Keypoint(x=p[0] + decalX, y=p[1] + decalY) for p in corner]
kps = ia.KeypointsOnImage(kps, shape=(imgH, imgW, 3))
return kps
rect = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=1, edgecolor='b', facecolor='none')
return rect
kps1 = corner_to_kps(corner1)
kps2 = corner_to_kps(corner2)
kps3 = corner_to_kps(corner3)
kps4 = corner_to_kps(corner4)
list_kps = [kps1, kps2, kps3, kps4]
def kps_rectangle(kps):
"""
Given 4 kps, return the values of the 4 lines of a box
"""
x1 = min([keypoints.x for keypoints in kps.keypoints])
x2 = max([keypoints.x for keypoints in kps.keypoints])
y1 = min([keypoints.y for keypoints in kps.keypoints])
y2 = max([keypoints.y for keypoints in kps.keypoints])
return x1, x2, y1, y2
def superposed(img, list_kps):
"""
If img cover any kps in list_kps, remove this kps
"""
e = 0
list_rem = []
for kps in list_kps:
x1, x2, y1, y2 = kps_rectangle(kps)
if img[int((y1 + e)):int((y2 - e)), int((x1 + e)):int((x2 - e)), :].any():
list_rem.append(kps)
for kps in list_rem:
list_kps.remove(kps)
return list_kps
class BBA: # Bounding box + annotations
def __init__(self, x1, x2, y1, y2, classname):
self.x1 = int(round(x1))
self.y1 = int(round(y1))
self.x2 = int(round(x2))
self.y2 = int(round(y2))
self.classname = classname
def create_image(cards, backgrounds, transf, scaleBg):
"""
Create an image composed of 2 images from cards and 1 background from background
using the imgaug transformation transf
returns image array, bba (bounding box + annotatons) and list of kps for both cards
"""
im1, card_val1 = cards.get_random()
img1 = np.zeros((imgH, imgW, 4), dtype=np.uint8)
img1[decalY:decalY + cardH, decalX:decalX + cardW, :] = im1
seq = transf.to_deterministic()
im1 = seq.augment_images([img1])[0]
list_kps_aug_1 = []
[list_kps_aug_1.append(seq.augment_keypoints([kps])[0]) for kps in list_kps]
im2, card_val2 = cards.get_random()
img2 = np.zeros((imgH, imgW, 4), dtype=np.uint8)
img2[decalY:decalY + cardH, decalX:decalX + cardW, :] = im2
seq = transf.to_deterministic()
im2 = seq.augment_images([img2])[0]
list_kps_aug_2 = []
[list_kps_aug_2.append(seq.augment_keypoints([kps])[0]) for kps in list_kps]
# Remove im2 boxes behind im1
list_kps_aug_1 = superposed(im2, list_kps_aug_1)
bba = []
for kps in list_kps_aug_1:
xmin, xmax, ymin, ymax = kps_rectangle(kps)
bba.append(BBA(xmin, xmax, ymin, ymax, card_val1))
for kps in list_kps_aug_2:
xmin, xmax, ymin, ymax = kps_rectangle(kps)
bba.append(BBA(xmin, xmax, ymin, ymax, card_val2))
bg = backgrounds.get_random()
bg = scaleBg.augment_image(bg)
mask1 = im1[:, :, 3]
mask1 = np.stack([mask1] * 3, -1)
final = np.where(mask1, im1[:, :, 0:3], bg)
mask2 = im2[:, :, 3]
mask2 = np.stack([mask2] * 3, -1)
final = np.where(mask2, im2[:, :, 0:3], final)
return final, bba, list_kps_aug_1, list_kps_aug_2
xml_body_1 = """<annotation>
<folder>FOLDER</folder>
<filename>{FILENAME}</filename>
<path>{PATH}</path>
<source>
<database>Unknown</database>
</source>
<size>
<width>{WIDTH}</width>
<height>{HEIGHT}</height>
<depth>3</depth>
</size>
"""
xml_object = """ <object>
<name>{CLASS}</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>{XMIN}</xmin>
<ymin>{YMIN}</ymin>
<xmax>{XMAX}</xmax>
<ymax>{YMAX}</ymax>
</bndbox>
</object>
"""
xml_body_2 = """</annotation>
"""
def create_voc_xml(xml_file, img_file, listbba, display=False):
with open(xml_file, "w") as f:
f.write(xml_body_1.format(
**{'FILENAME': os.path.basename(img_file), 'PATH': img_file, 'WIDTH': imgW, 'HEIGHT': imgH}))
for bba in listbba:
f.write(xml_object.format(
**{'CLASS': bba.classname, 'XMIN': bba.x1, 'YMIN': bba.y1, 'XMAX': bba.x2, 'YMAX': bba.y2}))
f.write(xml_body_2)
if display: print("New xml", xml_file)
def write_files(final, save_dir, listbba, display=False):
jpg_fn, xml_fn = give_me_filename(save_dir, ["jpg", "xml"])
plt.imsave(jpg_fn, final)
if display: print("New image saved in", jpg_fn)
create_voc_xml(xml_fn, jpg_fn, listbba, display=display)
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
|
from tetpyclient import RestClient
import json
import urllib3
# Access vars
API_ENDPOINT="https://plx.cisco.com"
CREDENTIALS_FILE='./api_credentials.json'
urllib3.disable_warnings()
rc = RestClient(API_ENDPOINT, credentials_file=CREDENTIALS_FILE, verify=False)
resp = rc.get('/openapi/v1/applications/')
#Print all applications
if resp.status_code == 200:
parsed_resp = json.loads(resp.content)
print(json.dumps(parsed_resp, indent=4, sort_keys=True)) |
class Solution(object):
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
ans = 0
for x in range(len(grid)):
for y in range(len(grid[0])):
if grid[x][y]:
ans = max(ans, self.bfs(grid, x, y))
return ans
def bfs(self, grid, x, y):
dxs = [1, 0, -1, 0]
dys = [0, 1, 0, -1]
queue = [(x, y)]
grid[x][y] = 0
ans = 0
while queue:
x, y = queue.pop(0)
ans += 1
for dx, dy in zip(dxs, dys):
nx, ny = x + dx, y + dy
if 0 <= nx < len(grid) and 0 <= ny < len(grid[0]):
if grid[nx][ny]:
grid[nx][ny] = 0
queue.append((nx, ny))
return ans |
# 1249. Minimum Remove to Make Valid Parentheses
# Given a string s of '(', ')' and lowercase English characters.
# Your task is to remove the minimum number of parentheses('(' or ')', in any positions) so that the resulting parentheses string is valid and return any valid string.
# Formally, a parentheses string is valid if and only if:
# It is the empty string, contains only lowercase characters, or
# It can be written as AB(A concatenated with B), where A and B are valid strings, or
# It can be written as (A), where A is a valid string.
# Example 1:
# Input: s = "lee(t(c)o)de)"
# Output: "lee(t(c)o)de"
# Explanation: "lee(t(co)de)", "lee(t(c)ode)" would also be accepted.
# Example 2:
# Input: s = "a)b(c)d"
# Output: "ab(c)d"
# Example 3:
# Input: s = "))(("
# Output: ""
# Explanation: An empty string is also valid.
# Example 4:
# Input: s = "(a(b(c)d)"
# Output: "a(b(c)d)"
class Solution(object):
def minRemoveToMakeValid(self, s):
arr = list(s)
stack = []
for i, char in enumerate(arr):
if char == '(':
stack.append(i)
elif char == ')':
if stack:
stack.pop()
else:
arr[i] = ''
while stack:
arr[stack.pop()] = ''
return ''.join(arr)
|
import matplotlib.pyplot as plt
def row_map(row):
return list(map(int, row))
with open("image-output/img-4000-40.txt", "r") as f:
d = f.readlines()
data = [row.split(',') for row in d[0][:-1].split(';')]
#print(data)
int_data = list(map(row_map, data))
plt.imshow(int_data, cmap="inferno")
plt.axis("off")
plt.savefig("img.pdf")
plt.show() |
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(User)
timestamp=models.DateTimeField(auto_now=False, auto_now_add=True)
updated=models.DateTimeField(auto_now=False, auto_now_add=True)
def __str__(self):
return self.user.username
|
nvar = 4
# conserved variables -- we set these when we initialize for they match
# the ccData2d object
idens = -1
ixmom = -1
iymom = -1
iener = -1
# for primitive variables
irho = 0
iu = 1
iv = 2
ip = 3
|
import pyblish.api
from avalon import io
from reveries.maya import utils
from maya import cmds
class _ValidateModelConsistencyOnLook(pyblish.api.InstancePlugin):
"""(Deprecated) Ensure model UUID consistent and unchanged in LookDev
"""
order = pyblish.api.ValidatorOrder
hosts = ["maya"]
label = "LookDev Model Consistency"
families = [
"reveries.look",
]
model_family = "reveries.model"
rig_family = "reveries.rig"
def process(self, instance):
if "xgen" in instance.data["subset"].lower():
self.log.info("No need to check on XGen look.")
return
elif "rig" in instance.data["subset"].lower():
# rig's look
self.log.info("Checking on rig and model.")
FAMILIES = [
self.model_family,
self.rig_family,
]
repr_name = "mayaBinary"
else:
# model's look
self.log.info("Checking on model.")
FAMILIES = [
self.model_family,
]
repr_name = "mayaBinary"
collected_profiles = dict()
asset = instance.context.data["assetDoc"]
assert asset["name"] == instance.data["asset"], "Not the same asset."
self.log.info("Asset: %s" % asset["name"])
for subset in io.find({"type": "subset", "parent": asset["_id"]}):
latest = io.find_one({"type": "version", "parent": subset["_id"]},
sort=[("name", -1)])
if latest is None:
continue
if not any(family in latest["data"]["families"]
for family in FAMILIES):
continue
# Get representation
representation = io.find_one({"type": "representation",
"parent": latest["_id"],
"name": repr_name})
profile = representation["data"]["modelProfile"]
collected_profiles[subset["name"]] = profile
if not collected_profiles:
# Model not even published before, this is not right.
self.log.error("No model been found.")
raise Exception("No model for this look has been published "
"before, please publish model first.")
hierarchy = cmds.ls(instance.data["requireAvalonUUID"], long=True)
hierarchy += cmds.listRelatives(hierarchy,
allDescendents=True,
fullPath=True) or []
meshes = cmds.ls(hierarchy, type="mesh", long=True)
uuid_required_geos = cmds.listRelatives(meshes,
parent=True,
fullPath=True)
if not uuid_required_geos:
raise Exception("No UUID required nodes.")
return
# Hash current model and collect Avalon UUID
geo_id_and_hash = dict()
hasher = utils.MeshHasher()
warned = False
for transform in uuid_required_geos:
# It must be one mesh paring to one transform.
mesh = cmds.listRelatives(transform,
shapes=True,
noIntermediate=True,
fullPath=True)[0]
id = utils.get_id(transform)
if id is None:
if not warned:
self.log.warning("Some mesh has no Avalon UUID.")
warned = True
continue
hasher.set_mesh(mesh)
hasher.update_points()
hasher.update_normals()
hasher.update_uvmap()
# id must be unique, no other should have same id.
geo_id_and_hash[id] = hasher.digest()
hasher.clear()
# Find matched model/rig subsets
matched = list()
for name, profile in collected_profiles.items():
current_ids = set(geo_id_and_hash.keys())
previous_ids = set(profile.keys())
if current_ids.issuperset(previous_ids):
self.log.info("Match found: %s" % name)
matched.append(name)
elif (self.rig_family in FAMILIES and
current_ids.issubset(previous_ids)):
# In current pipeline, the look for rig is only for preview,
# no need to be strict on this.
self.log.info("Partial match found: %s" % name)
matched.append(name)
else:
self.log.debug("Not matched: %s" % name)
# Is current model/rig that this look applied to being published ?
being_published = False
staged_instances = [i for i in instance.context
if (i.data["family"] in FAMILIES and
i.data.get("publish", True))]
for inst in staged_instances:
nodes = cmds.ls(inst, long=True)
if set(nodes).issuperset(set(uuid_required_geos)):
self.log.info("Model/Rig is being published.")
being_published = True
break
else:
self.log.debug("Instance not match: %s" % inst.name)
# If it's not being published, check on match state
if not being_published:
if not matched:
raise Exception("Current models UUID is not consistent "
"with previous published version.\n"
"Please update your loaded model/rig, or "
"publish it if you are the author.")
else:
# Checking on mesh changes, and pop warning if changed.
changed_on = list()
for match in matched:
for id, hash in geo_id_and_hash.items():
if id not in collected_profiles[match]:
continue
if not collected_profiles[match][id] == hash:
changed_on.append(match)
break
if changed_on:
self.log.warning("Some model has been modified, the look "
"may not apply correctly on these "
"subsets:")
for changed in changed_on:
self.log.warning(changed)
|
import pyttsx3
engine = pyttsx3.init()
print("A B XC D E F G H I J ")
engine.say('A B XC D E F G H I J')
engine.runAndWait() |
# Generated by Django 2.0.7 on 2018-07-31 15:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yomarket', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='shop',
name='address',
field=models.CharField(blank=True, max_length=200),
),
]
|
# Standard library imports
import json
import threading
import logging
import time
# Third party imports
from kafka import KafkaConsumer
# Local application imports
from Config.config import NiktoAgents, KafkaTopicNames, KafkaConfig, KafkaGroupIds
from Module.AgentCaller.niktoCaller import NiktoCaller
class NiktoScanConsumers(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.consumer = KafkaConsumer(
KafkaTopicNames.NIKTOSCAN,
bootstrap_servers = KafkaConfig.BOOTSTRAPSERVER,
auto_offset_reset = 'earliest',
enable_auto_commit = False,
session_timeout_ms = NiktoAgents.SESSION_TIMEOUT_MS,
max_poll_records = 1,
max_poll_interval_ms = NiktoAgents.MAX_POLL_INTERVAL_MS,
group_id = KafkaGroupIds.NIKTOSCAN,
value_deserializer = lambda x: json.loads(x.decode('utf-8')))
self.niktoAgentsAddress = NiktoAgents.NIKTO_AGENT_ADDRESS
self.totalAgent = len(self.niktoAgentsAddress)
self.curAgentIndex = 0
self.logger = logging.getLogger(__name__)
self.currentRunningThread = [set() for _ in range(self.totalAgent)]
def run(self):
try:
while True:
msg_pack = self.consumer.poll(timeout_ms=2000)
for message in msg_pack.items():
for consumer_record in message[1]:
data = consumer_record.value
self.logger.info('Recieved {}'.format(data))
openports_http = data.get('openports_http')
openports_https = data.get('openports_https')
niktoData = data
if 'openports_http' in niktoData:
del niktoData['openports_http']
if 'openports_https' in niktoData:
del niktoData['openports_https']
for port in openports_http:
niktoData['target_url'] = "http://" + niktoData.get('target') + ":" + port
niktoData['portScanned'] = port
while True:
niktoAgentAddress = self.niktoAgentsAddress[self.curAgentIndex]
isMessageHandled = False
assignedIndex = -1
if self.isAgentCanScan(self.curAgentIndex):
niktoCaller = NiktoCaller(niktoAgentAddress.get('HOST'), niktoAgentAddress.get('PORT'), niktoData)
niktoCaller.start()
self.currentRunningThread[self.curAgentIndex].add(niktoCaller)
isMessageHandled = True
assignedIndex = self.curAgentIndex
self.curAgentIndex = (self.curAgentIndex + 1) % self.totalAgent
if isMessageHandled:
self.consumer.commit()
self.logger.info('{} is sent to agent [{}] -- its current running [{}] scans'.format(data, niktoAgentAddress, len(self.currentRunningThread[assignedIndex])))
break
if self.curAgentIndex == 0:
self.logger.info('All Nikto_Agent are busy.. sleep for {} seconds'.format(NiktoAgents.THREAD_SLEEP_SECOND))
time.sleep(NiktoAgents.THREAD_SLEEP_SECOND)
for port in openports_https:
niktoData['target_url'] = "https://" + niktoData.get('target') + ":" + port
niktoData['portScanned'] = port
while True:
niktoAgentAddress = self.niktoAgentsAddress[self.curAgentIndex]
isMessageHandled = False
assignedIndex = -1
if self.isAgentCanScan(self.curAgentIndex):
niktoCaller = NiktoCaller(niktoAgentAddress.get('HOST'), niktoAgentAddress.get('PORT'), niktoData)
niktoCaller.start()
self.currentRunningThread[self.curAgentIndex].add(niktoCaller)
isMessageHandled = True
assignedIndex = self.curAgentIndex
self.curAgentIndex = (self.curAgentIndex + 1) % self.totalAgent
if isMessageHandled:
self.consumer.commit()
self.logger.info('{} is sent to agent [{}] -- its current running [{}] scans'.format(data, niktoAgentAddress, len(self.currentRunningThread[assignedIndex])))
break
if self.curAgentIndex == 0:
self.logger.info('All Nikto_Agent are busy.. sleep for {} seconds'.format(NiktoAgents.THREAD_SLEEP_SECOND))
time.sleep(NiktoAgents.THREAD_SLEEP_SECOND)
except:
self.logger.exception("Thread " + __name__ + " terminated")
def isAgentCanScan(self, curAgentIndex):
# TODO: check if we can ping to the host or not.
try:
MAX_THREAD_PER_AGENT = NiktoAgents.MAX_SCAN_PER_AGENT
total = 0
for thread in self.currentRunningThread[curAgentIndex].copy():
if thread.is_alive():
total += 1
else:
self.currentRunningThread[curAgentIndex].remove(thread)
return total < MAX_THREAD_PER_AGENT
except:
self.logger.exception("Thread " + __name__ + " have some error")
return False
|
"""
The DESDM single-CCD image masking module.
"""
import os
__author__ = "Felipe Menanteau, Alex Drlica-Wagner, Eli Rykoff"
__version__ = '3.0.3'
__revision__= '0'
version = __version__
from . import immasklib
from .immasklib import cmdline
from .immasklib import elapsed_time
|
def etabar(t,per,A=1):
n=A*numpy.sin(2*t*numpy.pi/per)
return n
def noisify(arr):
A=numpy.max(arr)
eps=numpy.random.normal(0,0.04*A,len(arr))
arr=arr+eps
return arr,eps
def der(arr,t):
d=(arr[2:]-arr[:-2])/(2*(t[2:]-t[:-2]))
return d
def d2(arr,t):
d=(arr[2:]-2*arr[1:-1]+arr[:-2])/((t[2:]-t[:-2])*(t[2:]-t[:-2]))
return d
#Main program
import numpy
import matplotlib.pyplot as plt
p=2*numpy.pi
t=numpy.linspace(0,5*p,201)
etabar=etabar(t,p)
eta,noise=noisify(etabar)
meanE=numpy.sum(noise)/len(noise)
sdE=numpy.sqrt(numpy.sum(noise*noise)/len(noise)-meanE**2)
print("Mean noise: %7.4g, noise standard deviation: %7.4g"%(meanE,sdE))
detabar=der(etabar,t)
#deta=der(eta,t)
dE=der(noise,t)
meandE=numpy.sum(dE)/len(dE)
sddE=numpy.sqrt(numpy.sum(dE*dE)/len(dE)-meandE**2)
print("Mean noise derivative: %7.4g, noise derivative standard deviation: %7.4g"%(meandE,sddE))
d2etabar=d2(etabar,t)
d2E=d2(noise,t)
meand2E=numpy.sum(d2E)/len(d2E)
sdd2E=numpy.sqrt(numpy.sum(d2E*d2E)/len(d2E)-meand2E**2)
print("Mean noise second derivative: %7.4g, noise second derivative standard deviation: %7.4g"%(meand2E,sdd2E))
print("Conclusion: When derivatives are calculated, the standar deviation of the noise increases, making such derivatives more noisy and innacurate than the original values")
plt.figure()
plt.plot(t,etabar,"r-",label="etabar")
plt.plot(t,eta,"bo",label="eta")
plt.plot(t,noise,"g--",label="noise")
plt.legend()
plt.savefig("ej8.45eta.png")
plt.figure()
plt.plot(t[1:-1],detabar,"r-",label="d(etabar)/dt")
#plt.plot(t[1:-1],deta,"co",label="d(eta)/dt")
plt.plot(t[1:-1],(detabar+dE),"bo",label="d(eta)/dt")
plt.plot(t[1:-1],dE,"g--",label="d(noise)/dt")
plt.legend()
plt.savefig("ej8.45deta.png")
plt.figure()
plt.plot(t[1:-1],d2etabar,"r-",label="d2(etabar)/dt2")
#plt.plot(t[1:-1],deta,"co",label="d(eta)/dt")
plt.plot(t[1:-1],(d2etabar+d2E),"bo",label="d2(eta)/dt2")
plt.plot(t[1:-1],d2E,"g--",label="d2(noise)/dt2")
plt.legend()
plt.savefig("ej8.45d2eta.png")
#plt.show()
|
import cv2
a = cv2.imread("D:/paper/IEEE model/0 2019 paper/picture/3shatian6_frame_10_1.png")
b = cv2.resize(a,(1080,720),interpolation=cv2.INTER_AREA)
cv2.imshow("b",b)
cv2.imwrite("3shatian6_frame_10_2.png", b)
cv2.waitKey(0) #等待按键 |
#!/usr/bin/env python3
#
# Evaluate an assembly file
# (not the same as running the virtual machine)
#
import sys
from meta import register_names, flag_names
from utils import remove_comment
import core
def get_operation_by_name(name):
module = sys.modules['core']
return getattr(module, 'op_' + name)
def parse_argument(arg):
if arg.isdigit():
arg = int(arg)
assert arg < 128
assert arg >= -128
elif arg.startswith("'"):
assert len(arg) == 3
return ord(arg[1])
else:
assert arg in (register_names + flag_names)
return arg
def parse_instruction(line):
[op, *args] = line.split(' ')
assert op
return op, list(map(parse_argument, args))
def eval_line(line):
op, args = parse_instruction(remove_comment(line).strip())
fn = get_operation_by_name(op)
fn(*args)
def eval_code(code):
for line in code.split('\n'):
if remove_comment(line).strip():
if core.trace:
print('> ', line.strip())
eval_line(line)
core.test()
core.trace = True
if __name__ == '__main__':
core.reset()
eval_code("""
mov 'h' a
putc
mov 'i' a
putc
""")
|
from django.conf.urls import url
from ..views import introduction
urlpatterns = [
url(r'^new$', introduction.new, name='introduction_new'),
url(r'^(?P<pk>\d+)/update$', introduction.update, name='introduction_update'),
url(r'^get$', introduction.get, name='introduction_get'),
] |
from imports import *
from data_loading import *
def make_rand_attacks(name, x, y, model, method, step_size, chunk=50,
epsilon=0.1, n_steps=1):
x_aug = []
idx = 0
while idx < len(x):
if (idx + chunk < len(x)):
x_aug.extend(gen_rand_attack(x[idx:idx + chunk],
y[idx:idx + chunk],
model, method, step_size,
epsilon, n_steps))
else:
x_aug.extend(gen_rand_attack(x[idx:len(x)],
y[idx:len(y)],
model, method, step_size,
epsilon, n_steps))
idx += chunk
print(idx)
output = np.array(x_aug)
PATH = "/content/drive/My Drive/ML Final Project Files/" # Randy's Path
with open(PATH + name + '.npy', 'wb') as f:
np.save(f, output)
return output
def gen_rp(x, epsilon):
r = rn.uniform(-epsilon, epsilon, x.shape)
return np.clip(x + r, 0, 1) # Pixel range
####################################################################
# Guo, C., Gardner, J., You, Y., Wilson, A. and Weinberger, K., 2019.
# Simple Black-Box Adversarial Attacks
def get_probs(model, x, y):
if (len(x.shape) < 4):
x = torch.unsqueeze(x, 1)
output = model(x.float())
return torch.nn.functional.softmax(output, dim=1)
# (untargeted) SimBA for batch
def simba_single(model, x, y, num_iters=10000, step_size=0.2, epsilon=0.2):
im = torch.clone(
torch.from_numpy(x)).detach().requires_grad_(True).to(device)
im_orig = torch.clone(im).detach().to("cpu")
for i in range(num_iters):
last_prob = get_probs(model, im, y)
x_L = torch.clone(im).detach().to(device)
x_R = torch.clone(im).detach().to(device)
for j in range(im.shape[0]):
dim1 = rn.randint(im[j].shape[0])
dim2 = rn.randint(im[j].shape[1])
x_L[j, dim1, dim2] = x_L[j, dim1, dim2] - step_size
x_R[j, dim1, dim2] = x_R[j, dim1, dim2] + step_size
x_L = torch.clone(x_L).detach().to("cpu")
x_L = np.clip(x_L, im_orig - epsilon, im_orig + epsilon)
x_L = torch.clone(x_L).detach().to(device)
x_L = x_L.clamp(0, 1)
#################################################
x_R = torch.clone(x_R).detach().to("cpu")
x_R = np.clip(x_R, im_orig - epsilon, im_orig + epsilon)
x_R = torch.clone(x_R).detach().to(device)
x_R = x_R.clamp(0, 1)
left_prob = get_probs(model, x_L, y)
right_prob = get_probs(model, x_R, y)
for j in range(im.shape[0]):
if left_prob[j, y[j]] < last_prob[j, y[j]]:
im[j, :, :] = x_L[j, :, :]
elif right_prob[j, y[j]] < last_prob[j, y[j]]:
im[j, :, :] = x_R[j, :, :]
ret = im.detach().to("cpu").numpy()
return ret
####################################################################
# Method will either be "RP" or "SimBA"
def gen_rand_attack(x, y, model, method, step_size, epsilon, n_steps):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if (model):
model.to(device)
model.eval()
if method == "RP":
augment_x = gen_rp(x, epsilon)
elif method == "SimBA":
augment_x = simba_single(model=model, x=x, y=y, num_iters=n_steps,
step_size=step_size, epsilon=epsilon)
else:
print("Unsupported TYPE (RP or SimBA)")
return
augment_x = np.clip(augment_x, x - epsilon, x + epsilon)
return clean_images(augment_x)
|
#!/usr/bin/env python 2.7
# -*- coding: utf-8 -*-
import env
import detect_engines.pdf.lib.classifier as classifier
import detect_engines.pdf.lib.get_pdf_features as gpf
def detect(data, filename):
try:
vector = gpf.get_pdf_features(data, filename)
clf = classifier.get_pdf_classifier()
result = classifier.identify(clf, vector)
except:
raise
return result
def describe(data, filename):
result = ''
try:
vector = gpf.get_pdf_features(data, filename)
if int(vector[296]) == 2:
result += 'Long Annotation Length\n'
if int(vector[298]) == 2:
result += 'Long Confused String\n'
if int(vector[299]) == 2:
result += 'Multi Confused Strings\n'
if int(vector[299]) == 1:
result += 'Few Confused Strings\n'
if int(vector[300]) == 1:
result += 'Function Name in Strings\n'
if int(vector[301]) == 1:
result += 'Out Reference\n'
if int(vector[302]) == 2:
result += 'Multi Long Name Functions\n'
if int(vector[302]) == 1:
result += 'Few Long Name Functions\n'
if int(vector[303]) == 2:
result += 'Multi Long Strings\n'
if int(vector[303]) == 1:
result += 'Few Long Strings\n'
if int(vector[304]) == 2:
result += 'Multi Long Name Var\n'
if int(vector[304]) == 1:
result += 'Few Long Name Var\n'
if int(vector[305]) == 2 or int(vector[305]) == 1:
result += 'Multi \'+\' in Strings\n'
if int(vector[306]) > 0:
result += 'Suspicious Api \'app\'\n'
if int(vector[307]) > 0:
result += 'Suspicious Api \'apply\'\n'
if int(vector[308]) > 0:
result += 'Suspicious Api \'charCodeAt\'\n'
if int(vector[309]) > 0:
result += 'Suspicious Api \'concat\'\n'
if int(vector[310]) > 0:
result += 'Suspicious Api \'Date\'\n'
if int(vector[311]) > 0:
result += 'Suspicious Api \'doc\'\n'
if int(vector[312]) > 0:
result += 'Suspicious Api \'escape\'\n'
if int(vector[313]) > 0:
result += 'Suspicious Api \'eval\'\n'
if int(vector[314]) > 0:
result += 'Suspicious Api \'execInitializ\'\n'
if int(vector[315]) > 0:
result += 'Suspicious Key Word \'for\'\n'
if int(vector[316]) > 0:
result += 'Suspicious Api \'fromCharCode\'\n'
if int(vector[317]) > 0:
result += 'Suspicious Api \'function\'\n'
if int(vector[318]) > 0:
result += 'Suspicious Api \'join\'\n'
if int(vector[319]) > 0:
result += 'Suspicious Api \'getAnnots\'\n'
if int(vector[320]) > 0:
result += 'Suspicious Api \'length\'\n'
if int(vector[321]) > 0:
result += 'Suspicious Api \'new\'\n'
if int(vector[322]) > 0:
result += 'Suspicious Api \'push\'\n'
if int(vector[323]) > 0:
result += 'Suspicious Api \'rawValue\'\n'
if int(vector[324]) > 0:
result += 'Suspicious Api \'replace\'\n'
if int(vector[325]) > 0:
result += 'Suspicious Api \'search\'\n'
if int(vector[326]) > 0:
result += 'Suspicious Api \'String\'\n'
if int(vector[327]) > 0:
result += 'Suspicious Api \'substr\'\n'
if int(vector[328]) > 0:
result += 'Suspicious Api \'splite\'\n'
if int(vector[329]) > 0:
result += 'Suspicious Api \'syncAnnotScan\'\n'
if int(vector[330]) > 0:
result += 'Suspicious Api \'target\'\n'
if int(vector[331]) > 0:
result += 'Suspicious Key Word \'this\'\n'
if int(vector[332]) > 0:
result += 'Suspicious Api \'toLowerCase\'\n'
if int(vector[333]) > 0:
result += 'Suspicious Api \'toUpperCase\'\n'
if int(vector[334]) > 0:
result += 'Suspicious Api \'util.printf\'\n'
if int(vector[335]) > 0:
result += 'Suspicious Api \'unescape\'\n'
if int(vector[336]) > 0:
result += 'Suspicious Key Word \'while\'\n'
return result
except:
raise
|
# -*- coding: utf-8 -*-
"""
biosppy.signals.bvp
-------------------
This module provides methods to process Blood Volume Pulse (BVP) signals.
-------- DEPRECATED --------
PLEASE, USE THE PPG MODULE
This module was left for compatibility
----------------------------
:copyright: (c) 2015-2018 by Instituto de Telecomunicacoes
:license: BSD 3-clause, see LICENSE for more details.
"""
# Imports
# compat
from __future__ import absolute_import, division, print_function
from six.moves import range
# 3rd party
import numpy as np
# local
from . import tools as st
from . import ppg
from .. import plotting, utils
def bvp(signal=None, sampling_rate=1000., path=None, show=True):
"""Process a raw BVP signal and extract relevant signal features using
default parameters.
Parameters
----------
signal : array
Raw BVP signal.
sampling_rate : int, float, optional
Sampling frequency (Hz).
path : str, optional
If provided, the plot will be saved to the specified file.
show : bool, optional
If True, show a summary plot.
Returns
-------
ts : array
Signal time axis reference (seconds).
filtered : array
Filtered BVP signal.
onsets : array
Indices of BVP pulse onsets.
heart_rate_ts : array
Heart rate time axis reference (seconds).
heart_rate : array
Instantaneous heart rate (bpm).
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
# ensure numpy
signal = np.array(signal)
sampling_rate = float(sampling_rate)
# filter signal
filtered, _, _ = st.filter_signal(signal=signal,
ftype='butter',
band='bandpass',
order=4,
frequency=[1, 8],
sampling_rate=sampling_rate)
# find onsets
onsets,_ = ppg.find_onsets_elgendi2013(signal=filtered, sampling_rate=sampling_rate)
# compute heart rate
hr_idx, hr = st.get_heart_rate(beats=onsets,
sampling_rate=sampling_rate,
smooth=True,
size=3)
# get time vectors
length = len(signal)
T = (length - 1) / sampling_rate
ts = np.linspace(0, T, length, endpoint=False)
ts_hr = ts[hr_idx]
# plot
if show:
plotting.plot_bvp(ts=ts,
raw=signal,
filtered=filtered,
onsets=onsets,
heart_rate_ts=ts_hr,
heart_rate=hr,
path=path,
show=True)
# output
args = (ts, filtered, onsets, ts_hr, hr)
names = ('ts', 'filtered', 'onsets', 'heart_rate_ts', 'heart_rate')
return utils.ReturnTuple(args, names)
|
import numpy as np
import pandas as pd
from contextlib import contextmanager
import warnings
from scipy.spatial.distance import pdist, squareform
from enum import Enum
class SymbolType(Enum):
Stock=1,
ETF=2
# General purpose utility functions for the simulator, attached to no particular class.
# Available to any agent or other module/utility. Should not require references to
# any simulator object (kernel, agent, etc).
# Module level variable that can be changed by config files.
silent_mode = False
# This optional log_print function will call str.format(args) and print the
# result to stdout. It will return immediately when silent mode is active.
# Use it for all permanent logging print statements to allow fastest possible
# execution when verbose flag is not set. This is especially fast because
# the arguments will not even be formatted when in silent mode.
def log_print (str, *args):
if not silent_mode: print (str.format(*args))
# Accessor method for the global silent_mode variable.
def be_silent ():
return silent_mode
# Utility method to flatten nested lists.
def delist(list_of_lists):
return [x for b in list_of_lists for x in b]
# Utility function to get agent wake up times to follow a U-quadratic distribution.
def get_wake_time(open_time, close_time, a=0, b=1):
""" Draw a time U-quadratically distributed between open_time and close_time.
For details on U-quadtratic distribution see https://en.wikipedia.org/wiki/U-quadratic_distribution
"""
def cubic_pow(n):
""" Helper function: returns *real* cube root of a float"""
if n < 0:
return -(-n) ** (1.0 / 3.0)
else:
return n ** (1.0 / 3.0)
# Use inverse transform sampling to obtain variable sampled from U-quadratic
def u_quadratic_inverse_cdf(y):
alpha = 12 / ((b - a) ** 3)
beta = (b + a) / 2
result = cubic_pow((3 / alpha) * y - (beta - a)**3 ) + beta
return result
uniform_0_1 = np.random.rand()
random_multiplier = u_quadratic_inverse_cdf(uniform_0_1)
wake_time = open_time + random_multiplier * (close_time - open_time)
return wake_time
def numeric(s):
""" Returns numeric type from string, stripping commas from the right.
Adapted from https://stackoverflow.com/a/379966."""
s = s.rstrip(',')
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
def get_value_from_timestamp(s, ts):
""" Get the value of s corresponding to closest datetime to ts.
:param s: pandas Series with pd.DatetimeIndex
:type s: pd.Series
:param ts: timestamp at which to retrieve data
:type ts: pd.Timestamp
"""
ts_str = ts.strftime('%Y-%m-%d %H:%M:%S')
s = s.loc[~s.index.duplicated(keep='last')]
locs = s.index.get_loc(ts_str, method='nearest')
out = s[locs][0] if (isinstance(s[locs], np.ndarray) or isinstance(s[locs], pd.Series)) else s[locs]
return out
@contextmanager
def ignored(warning_str, *exceptions):
""" Context manager that wraps the code block in a try except statement, catching specified exceptions and printing
warning supplied by user.
:param warning_str: Warning statement printed when exception encountered
:param exceptions: an exception type, e.g. ValueError
https://stackoverflow.com/a/15573313
"""
try:
yield
except exceptions:
warnings.warn(warning_str, UserWarning, stacklevel=1)
if not silent_mode:
print(warning_str)
def generate_uniform_random_pairwise_dist_on_line(left, right, num_points, random_state=None):
""" Uniformly generate points on an interval, and return numpy array of pairwise distances between points.
:param left: left endpoint of interval
:param right: right endpoint of interval
:param num_points: number of points to use
:param random_state: np.RandomState object
:return:
"""
x_coords = random_state.uniform(low=left, high=right, size=num_points)
x_coords = x_coords.reshape((x_coords.size, 1))
out = pdist(x_coords, 'euclidean')
return squareform(out)
def meters_to_light_ns(x):
""" Converts x in units of meters to light nanoseconds
:param x:
:return:
"""
x_lns = x / 299792458e-9
x_lns = x_lns.astype(int)
return x_lns
def validate_window_size(s):
""" Check if s is integer or string 'adaptive'. """
try:
return int(s)
except ValueError:
if s.lower() == 'adaptive':
return s.lower()
else:
raise ValueError(f'String {s} must be integer or string "adaptive".')
def sigmoid(x, beta):
""" Numerically stable sigmoid function.
Adapted from https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/"
"""
if x >= 0:
z = np.exp(-beta*x)
return 1 / (1 + z)
else:
# if x is less than zero then z will be small, denom can't be
# zero because it's 1+z.
z = np.exp(beta*x)
return z / (1 + z)
|
# Zero Matrix
# Write an algorithm such that, if an element in an M x N matrix is 0, its entire row and column are set to 0.
|
#!/usr/bin/env python
# coding=utf-8
a = int(input());
try:
print('start trying...')
r = 10 / a
print('result:',r)
except ZeroDivisionError as e:
print('except:',e)
finally:
print('error!')
print('END')
|
import logging
import statistics
import time
from math import ceil
from typing import List, Tuple, Optional, Dict
from gensim.models import KeyedVectors
class SimDimClusterWorker:
def __init__(self, model: KeyedVectors):
self._model: KeyedVectors = model
self._minimum_cluster_size: int = max(10, len(self._model.vectors) // (self._model.vector_size * 10))
# state for cluster extraction
self._dimension: int = 0
self._sorted_values: List[float] = []
self._sorted_labels: List[str] = []
self._tolerance: float = 0.0
self._biggest_cluster: Tuple[int, int] = (0, 0)
self._current_cluster: Tuple[int, int] = (0, 0)
self._current_start_index: int = 0
self._current_end_index: int = 0
def __call__(self, dimension: int):
return self.extract_cluster(dimension)
def extract_cluster(self, dimension: int) -> Optional[Dict[int, List[str]]]:
self._dimension = dimension
logging.info(f"[DIMENSION-{self._dimension}] begin")
vector_values: List[float] = [vector[self._dimension].item() for vector in self._model.vectors]
vector_labels: List[str] = list(self._model.vocab.keys())
sorted_tuples: List[Tuple[float, str]] = sorted(zip(vector_values, vector_labels), key=lambda x: x[0])
self._sorted_values = [x[0] for x in sorted_tuples]
self._sorted_labels = [x[1] for x in sorted_tuples]
self._tolerance = statistics.mean(self._sorted_values) / 2
self._tolerance = self._calculate_tolerance()
self._create_biggest_cluster()
logging.info(f"[DIMENSION-{self._dimension}] tolerance: {self._tolerance}")
logging.info(f"[DIMENSION-{self._dimension}] mean: {statistics.mean(self._sorted_values)}")
logging.info(f"[DIMENSION-{self._dimension}] done")
if self._len(self._biggest_cluster) < self._minimum_cluster_size:
return None
return {self._dimension: [self._sorted_labels[label_index]
for label_index in range(self._biggest_cluster[0], self._biggest_cluster[1])]}
def _calculate_tolerance(self):
tolerance: float = 0
for i in range(1, len(self._sorted_values)):
tolerance += abs(self._sorted_values[i] - self._sorted_values[i - 1])
return (tolerance / (len(self._sorted_values) - 1)) * (self._minimum_cluster_size / 10)
def _create_biggest_cluster(self) -> None:
self._biggest_cluster = (0, 0)
self._current_start_index = 0
execution_times: List[float] = []
log_interval: int = len(self._sorted_values) // 10
while self._current_start_index < len(self._sorted_values) - self._min_len():
start_time: float = time.perf_counter()
self._create_current_cluster()
if self._len(self._current_cluster) > self._len(self._biggest_cluster):
self._biggest_cluster = self._current_cluster
execution_times.append(time.perf_counter() - start_time)
if len(execution_times) % log_interval == 0:
progression: float = self._current_start_index / len(self._sorted_values)
total_time: float = sum(execution_times)
logging.info(f"[DIMENSION-{self._dimension}]\t{'%06.2f%%' % (progression * 100.0)} "
f"avg: {'%06.4fs' % (total_time / len(execution_times))}; "
f"tot: {'%06.4fs' % total_time}; "
f"<<_biggest_cluster.len>>: {self._len(self._biggest_cluster)}")
execution_times.clear()
def _create_current_cluster(self) -> None:
self._current_cluster = (0, 0)
self._current_end_index = min(len(self._sorted_values) - 1,
self._current_start_index + self._min_len())
if self._end() - self._start() > self._tolerance:
self._current_start_index = min(len(self._sorted_values) - 1,
self._current_start_index + 1)
return
while self._current_end_index < len(self._sorted_values) - 1:
last_confirmed_start_index: int = self._current_end_index
self._current_end_index = min(len(self._sorted_values) - 1,
self._current_end_index + self._min_len())
difference: float = self._end() - self._start()
if difference < self._tolerance:
continue
if difference == self._tolerance:
while difference == self._tolerance and self._current_end_index < len(self._sorted_values) - 1:
self._current_end_index += min(len(self._sorted_values) - 1,
self._current_end_index + 1)
difference = self._end() - self._start()
else: # difference > self._tolerance
self._current_end_index = self._find_last_conditional_index_in_range(last_confirmed_start_index)
break
self._current_cluster = (self._current_start_index, self._current_end_index)
self._find_next_start_index()
def _find_next_start_index(self) -> None:
if self._current_end_index - self._current_start_index < 2:
self._current_start_index = self._current_end_index
return
self._current_start_index = self._find_first_conditional_index_in_range(self._current_start_index + 1)
def _find_last_conditional_index_in_range(self, start: int) -> int:
end: int = self._current_end_index
while end - start > 1:
pivot: int = start + ceil((end - start) / 2)
difference: float = self._difference(end_index=pivot)
if difference > self._tolerance:
end = pivot
continue
if difference == self._tolerance:
while difference == self._tolerance and pivot < self._current_end_index:
pivot += 1
difference = self._difference(end_index=pivot)
return pivot
# Difference < self.tolerance
start = pivot
return start if self._difference(end_index=start) > self._tolerance else end
def _find_first_conditional_index_in_range(self, start: int) -> int:
end: int = self._current_end_index
while end - start > 1:
pivot: int = start + ceil((end - start) / 2)
difference: float = self._difference(start_index=pivot)
if difference > self._tolerance:
start = pivot
continue
if difference == self._tolerance:
while difference == self._tolerance:
pivot -= 1
difference = self._difference(start_index=pivot)
return pivot + 1
# Difference < self.tolerance
end = pivot
return start if self._difference(start_index=start) <= self._tolerance else end
def _difference(self, start_index: int = None, end_index: int = None) -> float:
start_index = start_index if start_index is not None else self._current_start_index
end_index = end_index if end_index is not None else self._current_end_index
return self._sorted_values[end_index] - self._sorted_values[start_index]
def _start(self) -> float:
return self._sorted_values[self._current_start_index]
def _end(self) -> float:
return self._sorted_values[self._current_end_index]
@staticmethod
def _len(cluster: Tuple[int, int]) -> int:
return cluster[1] - cluster[0]
def _min_len(self) -> int:
return max(self._minimum_cluster_size, self._len(self._biggest_cluster))
|
import os
import pygame
from pygame import surface
class ActorCharacter:
"""This class manages the actor character"""
def __init__(self, actor_controller, actor_image=os.environ['test_image'], twitter_word='Default',
twitter_sentiment_colour=(0, 0, 230), starting_position=(600, 600, 100, 100)):
"""Initialize the actor character and set's its starting position"""
self.screen = actor_controller.screen
self.screen_rect = actor_controller.screen.get_rect()
# load the actor character image and get its rect.
self.image = pygame.image.load(actor_image)
# create a rectangle around image
self.rect = self.image.get_rect()
# set the font of the text drawn on the player character(change impact to font file environ)
self.font_title = pygame.font.SysFont("impact", 20)
# render the text using the variables provided
self.text_title = self.font_title.render(twitter_word, True, twitter_sentiment_colour)
# creates a rectangle surface for text
self.rect_title = self.text_title.get_rect()
# display text in the center of the screen
self.start_ps = pygame.Rect(starting_position)
def blit_me(self):
"""Draw actor character at its current location"""
s1 = self.screen.blit(self.image, self.rect)
s2 = s1.pygame.surface.blit(self.rect, self.rect_title)
return s2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
####
# 10/2010 Bernd Schlapsi <brot@gmx.info>
#
# This script is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Dependencies:
# * python-eyed3 (eyeD3 python library - http://eyed3.nicfit.net/)
# * steghide (steganography program - http://steghide.sourceforge.net/)
#
# The script extract the shownotes from the "Tin Foil Hat" podcast
# You can find the instructions how to extract shownotes for the
# "Tin Foil Hat" podcast here:
# http://cafeninja.blogspot.com/2010/10/tin-foil-hat-show-episode-001.html
import gpodder
import os
import subprocess
import tempfile
from gpodder.liblogger import log
try:
import eyeD3
except:
log( '(tfh shownotes hook) Could not find eyeD3')
TFH_TITLE='Tin Foil Hat'
class gPodderHooks(object):
def __init__(self):
log('"Tin Foil Hat" shownote extractor extension is initializing.')
def __extract_image(self, filename):
"""
extract image from the podcast file
"""
imagefile = None
try:
if eyeD3.isMp3File(filename):
tag = eyeD3.Mp3AudioFile(filename).getTag()
images = tag.getImages()
if images:
tempdir = tempfile.gettempdir()
img = images[0]
imagefile = img.getDefaultFileName()
img.writeFile(path=tempdir, name=imagefile)
imagefile = "%s/%s" % (tempdir, imagefile)
else:
log(u'No image found in %s' % filename)
except:
pass
return imagefile
def __extract_shownotes(self, imagefile):
"""
extract shownotes from the FRONT_COVER.jpeg
"""
shownotes = None
password = 'tinfoilhat'
shownotes_file = '/tmp/shownotes.txt'
myprocess = subprocess.Popen(['steghide', 'extract', '-f', '-p', password,
'-sf', imagefile, '-xf', shownotes_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = myprocess.communicate()
os.remove(imagefile)
if stderr.startswith('wrote extracted data to'):
#read shownote file
f = open(shownotes_file)
shownotes = f.read()
f.close()
else:
log(u'Error extracting shownotes from the image file %s' % imagefile)
return shownotes
def on_episode_downloaded(self, episode):
if episode.channel.title == TFH_TITLE:
filename = episode.local_filename(create=False, check_only=True)
if filename is None:
return
imagefile = self.__extract_image(filename)
if imagefile is None:
return
shownotes = self.__extract_shownotes(imagefile)
if shownotes is None:
return
# save shownotes in the database
if episode.description.find(shownotes) == -1:
episode.description = "%s\n\n<pre>%s</pre>" % (episode.description, shownotes)
episode.save()
episode.db.commit()
log(u'updated shownotes for podcast: (%s/%s)' % (episode.channel.title, episode.title))
|
import numpy as np
import scipy as sp
def read_xray_image():
# use sp.misc.imread(path)
pass
def write_xray_image():
pass
|
#!/usr/bin/python
import sys
snpmer_keys_file = sys.argv[1]
min_edge_coverage = int(sys.argv[2])
stride = int(sys.argv[3])
# read snp paths from stdin
# graph to stdout
snpmer_coverages = {}
snpmer_name_mapping = {}
with open(snpmer_keys_file) as f:
for l in f:
parts = l.strip().split(',')
start_pos = int(parts[0].split('_')[0])
key = "-".join(tuple(l.strip().split(',')))
snpmer_name_mapping[tuple(parts[1:-1])] = key
snpmer_coverages[key] = 0
edge_coverages = {}
read_paths = []
for l in sys.stdin:
path = l.strip().split('\t')[1].split(',')
readname = l.strip().split('\t')[0]
snpmer_path = []
for i in range(0, len(path)):
for j in range(0, len(path)):
key = path[i:i+j]
if i+j > len(path):
key += path[:i+j-len(path)]
key = tuple(key)
if key in snpmer_name_mapping:
snpmer_path.append(snpmer_name_mapping[key])
for i in range(0, len(snpmer_path)):
assert snpmer_path[i] in snpmer_coverages
snpmer_coverages[snpmer_path[i]] += 1
for i in range(1, len(snpmer_path)):
fromkey = snpmer_path[i-1]
tokey = snpmer_path[i]
if (fromkey, tokey) not in edge_coverages: edge_coverages[(fromkey, tokey)] = 0
edge_coverages[(fromkey, tokey)] += 1
read_paths.append((readname, snpmer_path))
for key in snpmer_coverages:
print("S\t" + key + "\t*\tLN:i:1\tKC:i:" + str(snpmer_coverages[key]))
for edge in edge_coverages:
if edge_coverages[edge] < min_edge_coverage: continue
print("L\t" + edge[0] + "\t+\t" + edge[1] + "\t+\t0M")
for (read, path) in read_paths:
print("P\t" + read + "\t" + ",".join(key + "+" for key in path) + "\t" + ",".join("*" for key in path))
|
# -*- coding: utf-8 -*-
"""
Course: CS 2302
Author: Wenro Osaretin
Instructor: Diego Aguirre
T.A.: Anindita Nath
Date of Last Modication: November 4, 2018
"""
###############################################################################
# Binary Search Tree #
###############################################################################
class Node:
def _init_(self, english_words):
self.words = english_words
self.left = None
self.right = None
def insertNode(r, node):
if r is None:
r = node
else:
if r.words > node.words:
if r.left is None:
r.left = node
else:
insertNode(r.left, node)
else:
if r.right is None:
r.right = node
else:
insertNode(r.right, node)
def printBST(r):
if not r:
return
print (r.words)
printBST(r.left)
printBST(r.right)
file_words = open("words.txt")
for line in file_words:
bst = Node(line[0])
insertNode(bst, Node(line))
printBST(bst)
###############################################################################
# AVL Tree #
###############################################################################
class Node2:
# Constructor with a key parameter creates the Node object.
def __init__(self, english_words):
self.key = english_words
self.parent = None
self.left = None
self.right = None
self.height = 0
# Calculate the current nodes' balance factor,
# defined as height(left subtree) - height(right subtree)
def get_balance(self):
# Get current height of left subtree, or -1 if None
left_height = -1
if self.left is not None:
left_height = self.left.height
# Get current height of right subtree, or -1 if None
right_height = -1
if self.right is not None:
right_height = self.right.height
# Calculate the balance factor.
return left_height - right_height
# Recalculate the current height of the subtree rooted at
# the node, usually called after a subtree has been
# modified.
def update_height(self):
# Get current height of left subtree, or -1 if None
left_height = -1
if self.left is not None:
left_height = self.left.height
# Get current height of right subtree, or -1 if None
right_height = -1
if self.right is not None:
right_height = self.right.height
# Assign self.height with calculated node height.
self.height = max(left_height, right_height) + 1
# Assign either the left or right data member with a new
# child. The parameter which_child is expected to be the
# string "left" or the string "right". Returns True if
# the new child is successfully assigned to this node, False
# otherwise.
def set_child(self, which_child, child):
# Ensure which_child is properly assigned.
if which_child != "left" and which_child != "right":
return False
# Assign the left or right data member.
if which_child == "left":
self.left = child
else:
self.right = child
# Assign the parent data member of the new child,
# if the child is not None.
if child is not None:
child.parent = self
# Update the node's height, since the subtree's structure
# may have changed.
self.update_height()
return True
# Replace a current child with a new child. Determines if
# the current child is on the left or right, and calls
# set_child() with the new node appropriately.
# Returns True if the new child is assigned, False otherwise.
def replace_child(self, current_child, new_child):
if self.left is current_child:
return self.set_child("left", new_child)
elif self.right is current_child:
return self.set_child("right", new_child)
# If neither of the above cases applied, then the new child
# could not be attached to this node.
return False
class AVLTree:
# Constructor to create an empty AVLTree. There is only
# one data member, the tree's root Node, and it starts
# out as None.
def __init__(self):
self.root = None
# Performs a left rotation at the given node. Returns the
# new root of the subtree.
def rotate_left(self, node):
# Define a convenience pointer to the right child of the
# left child.
right_left_child = node.right.left
# Step 1 - the right child moves up to the node's position.
# This detaches node from the tree, but it will be reattached
# later.
if node.parent is not None:
node.parent.replace_child(node, node.right)
else: # node is root
self.root = node.right
self.root.parent = None
# Step 2 - the node becomes the left child of what used
# to be its right child, but is now its parent. This will
# detach right_left_child from the tree.
node.right.set_child('left', node)
# Step 3 - reattach right_left_child as the right child of node.
node.set_child('right', right_left_child)
return node.parent
# Performs a right rotation at the given node. Returns the
# subtree's new root.
def rotate_right(self, node):
# Define a convenience pointer to the left child of the
# right child.
left_right_child = node.left.right
# Step 1 - the left child moves up to the node's position.
# This detaches node from the tree, but it will be reattached
# later.
if node.parent is not None:
node.parent.replace_child(node, node.left)
else: # node is root
self.root = node.left
self.root.parent = None
# Step 2 - the node becomes the right child of what used
# to be its left child, but is now its parent. This will
# detach left_right_child from the tree.
node.left.set_child('right', node)
# Step 3 - reattach left_right_child as the left child of node.
node.set_child('left', left_right_child)
return node.parent
# Updates the given node's height and rebalances the subtree if
# the balancing factor is now -2 or +2. Rebalancing is done by
# performing a rotation. Returns the subtree's new root if
# a rotation occurred, or the node if no rebalancing was required.
def rebalance(self, node):
# First update the height of this node.
node.update_height()
# Check for an imbalance.
if node.get_balance() == -2:
# The subtree is too big to the right.
if node.right.get_balance() == 1:
# Double rotation case. First do a right rotation
# on the right child.
self.rotate_right(node.right)
# A left rotation will now make the subtree balanced.
return self.rotate_left(node)
elif node.get_balance() == 2:
# The subtree is too big to the left
if node.left.get_balance() == -1:
# Double rotation case. First do a left rotation
# on the left child.
self.rotate_left(node.left)
# A right rotation will now make the subtree balanced.
return self.rotate_right(node)
# No imbalance, so just return the original node.
return node
def insert(self, node):
# Special case: if the tree is empty, just set the root to
# the new node.
if self.root is None:
self.root = node
node.parent = None
else:
# Step 1 - do a regular binary search tree insert.
current_node = self.root
while current_node is not None:
# Choose to go left or right
if node.key < current_node.key:
# Go left. If left child is None, insert the new
# node here.
if current_node.left is None:
current_node.left = node
node.parent = current_node
current_node = None
else:
# Go left and do the loop again.
current_node = current_node.left
else:
# Go right. If the right child is None, insert the
# new node here.
if current_node.right is None:
current_node.right = node
node.parent = current_node
current_node = None
else:
# Go right and do the loop again.
current_node = current_node.right
# Step 2 - Rebalance along a path from the new node's parent up
# to the root.
node = node.parent
while node is not None:
self.rebalance(node)
node = node.parent
def printAVL(r):
if not r:
return
print (r.words)
printAVL(r.left)
printAVL(r.right)
###############################################################################
# Red-Black Tree #
###############################################################################
# RBTNode class - represents a node in a red-black tree
class RBTNode:
def __init__(self, english_words, parent, is_red = False, left = None, right = None):
self.key = english_words
self.left = left
self.right = right
self.parent = parent
if is_red:
self.color = "red"
else:
self.color = "black"
# Returns true if both child nodes are black. A child set to None is considered
# to be black.
def are_both_children_black(self):
if self.left != None and self.left.is_red():
return False
if self.right != None and self.right.is_red():
return False
return True
def count(self):
count = 1
if self.left != None:
count = count + self.left.count()
if self.right != None:
count = count + self.right.count()
return count
# Returns the grandparent of this node
def get_grandparent(self):
if self.parent is None:
return None
return self.parent.parent
# Gets this node's predecessor from the left child subtree
# Precondition: This node's left child is not None
def get_predecessor(self):
node = self.left
while node.right is not None:
node = node.right
return node
# Returns this node's sibling, or None if this node does not have a sibling
def get_sibling(self):
if self.parent is not None:
if self is self.parent.left:
return self.parent.right
return self.parent.left
return None
# Returns the uncle of this node
def get_uncle(self):
grandparent = self.get_grandparent()
if grandparent is None:
return None
if grandparent.left is self.parent:
return grandparent.right
return grandparent.left
# Returns True if this node is black, False otherwise
def is_black(self):
return self.color == "black"
# Returns True if this node is red, False otherwise
def is_red(self):
return self.color == "red"
# Replaces one of this node's children with a new child
def replace_child(self, current_child, new_child):
if self.left is current_child:
return self.set_child("left", new_child)
elif self.right is current_child:
return self.set_child("right", new_child)
return False
# Sets either the left or right child of this node
def set_child(self, which_child, child):
if which_child != "left" and which_child != "right":
return False
if which_child == "left":
self.left = child
else:
self.right = child
if child != None:
child.parent = self
return True
class RedBlackTree:
def __init__(self):
self.root = None
def __len__(self):
if self.root is None:
return 0
return self.root.count()
def insert(self, key):
new_node = RBTNode(key, None, True, None, None)
self.insert_node(new_node)
def insert_node(self, node):
# Begin with normal BST insertion
if self.root is None:
# Special case for root
self.root = node
else:
current_node = self.root
while current_node is not None:
if node.key < current_node.key:
if current_node.left is None:
current_node.set_child("left", node)
break
else:
current_node = current_node.left
else:
if current_node.right is None:
current_node.set_child("right", node)
break
else:
current_node = current_node.right
# Color the node red
node.color = "red"
# Balance
self.insertion_balance(node)
def insertion_balance(self, node):
# If node is the tree's root, then color node black and return
if node.parent is None:
node.color = "black"
return
# If parent is black, then return without any alterations
if node.parent.is_black():
return
# References to parent, grandparent, and uncle are needed for remaining operations
parent = node.parent
grandparent = node.get_grandparent()
uncle = node.get_uncle()
# If parent and uncle are both red, then color parent and uncle black, color grandparent
# red, recursively balance grandparent, then return
if uncle is not None and uncle.is_red():
parent.color = uncle.color = "black"
grandparent.color = "red"
self.insertion_balance(grandparent)
return
# If node is parent's right child and parent is grandparent's left child, then rotate left
# at parent, update node and parent to point to parent and grandparent, respectively
if node is parent.right and parent is grandparent.left:
self.rotate_left(parent)
node = parent
parent = node.parent
# Else if node is parent's left child and parent is grandparent's right child, then rotate
# right at parent, update node and parent to point to parent and grandparent, respectively
elif node is parent.left and parent is grandparent.right:
self.rotate_right(parent)
node = parent
parent = node.parent
# Color parent black and grandparent red
parent.color = "black"
grandparent.color = "red"
# If node is parent's left child, then rotate right at grandparent, otherwise rotate left
# at grandparent
if node is parent.left:
self.rotate_right(grandparent)
else:
self.rotate_left(grandparent)
def rotate_left(self, node):
right_left_child = node.right.left
if node.parent != None:
node.parent.replace_child(node, node.right)
else: # node is root
self.root = node.right
self.root.parent = None
node.right.set_child("left", node)
node.set_child("right", right_left_child)
def rotate_right(self, node):
left_right_child = node.left.right
if node.parent != None:
node.parent.replace_child(node, node.left)
else: # node is root
self.root = node.left
self.root.parent = None
node.left.set_child("right", node)
node.set_child("left", left_right_child)
def printRB(r):
if not r:
return
print (r.words)
printRB(r.left)
printRB(r.right)
###############################################################################
# Count Anagrams #
###############################################################################
file = open("words.txt")
for line in file:
def count_anagrams(line, prefix=""):
count = 0
if len(line) <= 1:
str = prefix + line
if str in file:
prefix + line
return count
else:
for i in range(len(line)):
cur = line[i: i + 1]
before = line[0: i] # letters before cur
after = line[i + 1:] # letters after cur
if cur not in before: # Check if permutations of cur have not been generated.
count = count + 1
return count_anagrams(before + after, prefix + cur)
###############################################################################
# Greatest Number of Anagrams #
###############################################################################
def greatest_anagrams(word, prefix=""):
count = 0
m = 0
if len(word) <= 1:
str = prefix + word
if str in word:
prefix + word
if(count > m):
m = count
return m
else:
for i in range(len(word)):
cur = word[i: i + 1]
before = word[0: i] # letters before cur
after = word[i + 1:] # letters after cur
if cur not in before: # Check if permutations of cur have not been generated.
count = count + 1
return count_anagrams(before + after, prefix + cur)
file2 = open("words.txt")
for line in file2:
choice = input("AVL or RedBlack?")
if choice == "AVL":
avl = Node2(line[0])
avl.insert(avl, Node2(line))
printAVL(avl)
if choice == "RedBlack":
RedBlack = RBTNode(line[0], line[2])
RedBlack.insert(line)
printRB(RedBlack)
print(count_anagrams(line, ""))
file_words2 = open("anotherfile.txt")
for line in file_words2:
print(greatest_anagrams(line, "")) |
import logging
import colorgram
logging.basicConfig(level = logging.DEBUG)
def main():
"""
gets the top 13 colors from an image
:return: list of tuple of rgb colors
"""
colors = colorgram.extract('hirst-image.jpeg', 5)
color_list =[]
print('colors = \\\n[')
for color in colors:
print(f" {{\n 'red':' {color.rgb.r}")
print(f" 'green': {color.rgb.g}")
print(f" 'blue': {color.rgb.b}")
print(f" 'tuple': ({color.rgb.r}, {color.rgb.g}, {color.rgb.b})")
print(f" 'proportion': {color.proportion * 100:0.2f}%\n }}")
color_list += (color.rgb.r, color.rgb.g, color.rgb.b),
print(f"]\ntuple_list = {color_list}")
if __name__ == '__main__':
main()
# logging.debug(stuff)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 15:53:44 2019
@author: Dian, Aubert-Kato
This is a smart shoes program.
Start from reading the calibrated data
Filtration, normalizing, window cutting, feature extraction, ML program
"""
import scipy
from scipy import signal
import scipy.signal as sig
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import heapq
import time
import seaborn as sns
from multiprocessing import Pool
import psutil
import sys
import os
import glob
'''Identify functions'''
def instepfeature(window,availableSensors):
"The average difference in one step for all steps in a window"
"...."
def stepcounting(x):
"sumyl dosen't mean any about LEFT, it is just a sum of halfwindow, it could be L or R"
A = x.max(axis = 1)
A[A<1] = 0
count = 1
start = 0
step = []
for i in range(len(A)-1):
if A[i] == 0:
if A[i+1] == 0:
continue
else:
count = count + 1
start = i
else:
if A[i+1] != 0:
continue
else:
stop = i
t = stop - start
if A[start] == 0:
if t > 10:
step.append(x[start:stop+1])
return step
def avepeakdiff(step):
if step != []:
peakdiff = []
for each in step:
maxinstep = each.max(axis=1)
peaks, properties = scipy.signal.find_peaks(maxinstep,distance = 10, width = 5)
peaknum = len(peaks)
if peaknum > 1:
peakdiff.append(maxinstep[peaks[peaknum-1]]-maxinstep[peaks[0]])
else:
peakdiff.append(0)
else:
peakdiff = 0
return np.mean(peakdiff)
avL = availableSensors[[i in list(range(7)) for i in availableSensors]]
avR = availableSensors[[i in list(range(7,14)) for i in availableSensors]]
yl = np.array(window[:,avL])
yr = np.array(window[:,avR])
Lstep = stepcounting(yl)
Rstep = stepcounting(yr)
if (avepeakdiff(Lstep)+avepeakdiff(Rstep))/2 :
inst = (avepeakdiff(Lstep)+avepeakdiff(Rstep))/2
else:
inst = 0
return inst
def inerpeakinfo(window):
"Peak intervals"
def inersensorpeakinfo(single):
isf = []
peaks, properties = scipy.signal.find_peaks(single, distance = 30,prominence=2*np.std(single),width=0,rel_height=0.7)
"Total # of peaks"
ft_pk_no = len(peaks)
if ft_pk_no <= 0:
return [0,0,0,0,0,0,0]
"Average/std of magnitudes"
ft_mg_ave = np.average(properties['prominences'])
ft_mg_std = np.std(properties['prominences'])
"Average/std of width"
ft_wd_ave = np.average(properties['widths'])
ft_wd_std = np.std(properties['widths'])
ft_mg_ave = np.average(properties['prominences'])
if ft_pk_no > 1:
"Average distans between peaks"
ft_pk2pk_dst = np.average(np.diff(peaks))
ft_pk2pk_std = np.std(np.diff(peaks))
isf = [ft_pk_no,ft_pk2pk_dst,ft_pk2pk_std,ft_mg_ave,ft_mg_std,ft_wd_ave,ft_wd_std]
elif ft_pk_no == 1:
isf = [ft_pk_no,0,0,ft_mg_ave,ft_mg_std,ft_wd_ave,ft_wd_std]
else:
isf = [0,0,0,0,0,0,0]
return isf
inerpeakinfo = []
for column in window.T:
inerpeakinfo.append(inersensorpeakinfo(column))
return inerpeakinfo
def sensorgeneral (window):
std = []
ave = []
mid = []
maximum = []
for columns in window.T:
y = list(filter(lambda a: a != 0.0, columns)) #Ramove zeros
if len(y) == 0:
y = [0]
# y = columns
ave.append(np.mean(y))
std.append(np.std(y))
mid.append(np.median(y))
maximum.append(np.max(y))
return [ave,std,mid,maximum]
'''The difference Anterior & Posterior mean of max in one step'''
def anterposterNAKDR(window,availableSensors):
avL = availableSensors[[i in [0,2,3,4] for i in availableSensors]]
avR = availableSensors[[i in [7,9,10,11] for i in availableSensors]]
if (avL.size == 0 or not 6 in availableSensors) and (avR.size ==0 or not 13 in availableSensors):
return ()
if (avL.size > 0 and 6 in availableSensors):
anterL = window[:,avL].max(axis=1)
posterL = window[:,6]
diffl = np.mean(anterL)-np.mean(posterL)
correlationL = np.corrcoef(anterL,posterL)[0][1]
else:
diffl = 0
correlationL = 0
if (avR.size >0 and 13 in availableSensors):
anterR = window[:,avR].max(axis=1)
posterR = window[:,13]
diffr = np.mean(anterR)-np.mean(posterR)
correlationR = np.corrcoef(anterR,posterR)[0][1]
else:
diffr = diffl
correlationR = 0
avediff = (diffl+diffr)/2 if diffl > 0 else diffr
return (avediff,correlationL,correlationR)
'''The difference latterior & middle mean of max in one step'''
def lattomidNAKDR(window,availableSensors):
avL = availableSensors[[i in [4,5] for i in availableSensors]]
avR = availableSensors[[i in [11,12] for i in availableSensors]]
if (avL.size == 0 or not 2 in availableSensors) and (avR.size ==0 or not 9 in availableSensors):
return ()
if (avL.size > 0 and 2 in availableSensors):
latteriorL = window[:,avL].max(axis=1)
midL = window[:,2]
diffl = np.mean(latteriorL)-np.mean(midL)
correlationL = np.corrcoef(latteriorL,midL)[0][1]
else:
diffl = 0
correlationL = 0
if (avR.size >0 and 9 in availableSensors):
anterR = window[:,avR].max(axis=1)
posterR = window[:,9]
diffr = np.mean(anterR)-np.mean(posterR)
correlationR = np.corrcoef(anterR,posterR)[0][1]
else:
diffr = diffl
correlationR = 0
avediff = (diffl+diffr)/2 if diffl > 0 else diffr
return (avediff,correlationL,correlationR)
def overlappingrate (window, availableSensors, threshold = 5):
copyWin = window.copy()
copyWin[copyWin<threshold] = 0
count = 0
avL = availableSensors[[i in list(range(7)) for i in availableSensors]]
avR = availableSensors[[i in list(range(7,14)) for i in availableSensors]]
for each in copyWin:
if any(each[avL]) != 0 and any(each[avR]) != 0:
count += 1
return count/len(window)
def fft(window):
t = window.sum(axis = 1)
fft_abs_amp = np.abs(np.fft.fft(t))*2/len(t)
freq_spectrum = fft_abs_amp[1:int(np.floor(len(t) * 1.0 / 2)) + 1]
skewness = scipy.stats.skew(freq_spectrum[0:150])
# entropy = scipy.stats.entropy(freq_spectrum)
s=0
for i in range(25,int(np.floor(len(t) * 1.0 / 2))):
s+=i*freq_spectrum[i]
return (np.mean(freq_spectrum[30:150]),np.std(freq_spectrum[30:150]), (s/np.sum(freq_spectrum))/15, np.sum(freq_spectrum ** 2) / len(freq_spectrum), skewness)
def fft_RE(window,availableSensors):
t = window[:,availableSensors].sum(axis = 1)
TENhz = int(10*len(t)/100)#Identify 10Hz index for all window size
TWOhz = int(2*len(t)/100) #Identify 2Hz index for all window size
ONESIXSEVENhz = int(5/3*len(t)/100) #Identify 1.67Hz index for all window size
fft_abs_amp = np.abs(np.fft.fft(t))*2/len(t) #Calculate the amptitude
freq_spectrum = fft_abs_amp[1:int(np.floor(len(t) * 1.0 / 2)) + 1]
skewness = scipy.stats.skew(freq_spectrum[0:TENhz]) #skewness from 0-10Hz
s=0
for i in range(ONESIXSEVENhz,len(freq_spectrum)): # This is the step to modify the range of frequency of MeanFrequency. int(5/3*len(t)/100) means 1.67Hz
s+=i*freq_spectrum[i]
MeanFreq = (s/np.sum(freq_spectrum[ONESIXSEVENhz:len(freq_spectrum)])/(len(t)/100))
power = np.sum(freq_spectrum ** 2)/len(freq_spectrum)
return (np.mean(freq_spectrum[TWOhz:TENhz]),np.std(freq_spectrum[TWOhz:TENhz]), MeanFreq, power, skewness)
'''Functions to read files'''
def read_files(path, extention, labels = ['upstairs', 'downstairs', 'jog', 'nonlocal', 'run', 'sit', 'stand', 'walkF', 'walkN', 'upslop', 'cycling']):
files = []
for i in range(len(labels)):
filename = path + labels[i] + extention
with open(filename, 'rb') as f:
files.append(pickle.load(f))
return files
def read_csvs(path):
fullData = []
for fname in glob.glob(path+os.path.sep+'*.csv'):
with open(fname, 'r') as f:
fullData.append(pd.read_csv(f))
return fullData
'''Filtration'''
def filtdata(calibdata_list, order_filter=2, critical_frequency=0.2):
b, a = sig.butter(order_filter, critical_frequency)
filtdata_list = []
for i in range(len(calibdata_list)):
filtdata_list.append(np.zeros((len(calibdata_list[i]), 14)))
caliarray = np.array(calibdata_list[i][['L1','L2','L3','L4','L5','L6','L7','R1','R2','R3','R4','R5','R6','R7']])
for j in range(0,14):
filtdata_list[i][:,j]= sig.filtfilt(b,a,caliarray[:,j])
for i in range(len(filtdata_list)):
# remove the negative value
filtdata_list[i][filtdata_list[i]<0] = 0
return filtdata_list
'''Window cutting'''
def cutWindow(data, wl, step):
# data_list_reshaped = []
data_window_lists = []
j = 0
while j < len(data) - wl:
window = data[j : j + wl]
data_window_lists.append(window)
j += step
return data_window_lists
'''Hardcoded list of features; should be moved to a config file'''
def init_feature_names():
columnNames = ['aveL7', 'aveL6','aveL5', 'aveL4','aveL3', 'aveL2','aveL1',
'aveR7', 'aveR6','aveR5', 'aveR4','aveR3', 'aveR2','aveR1',
'stdL7', 'stdL6','stdL5', 'stdL4','stdL3', 'stdL2','stdL1',
'stdR7', 'stdR6','stdR5', 'stdR4','stdR3', 'stdR2','stdR1',
'midL7', 'midL6','midL5', 'midL4','midL3', 'midL2','midL1',
'midR7', 'midR6','midR5', 'midR4','midR3', 'midR2','midR1',
'maxL7', 'maxL6','maxL5', 'maxL4','maxL3', 'maxL2','maxL1',
'maxR7', 'maxR6','maxR5', 'maxR4','maxR3', 'maxR2','maxR1',
'peaknumL7','peakdisaveL7','peakdisstdL7','peakmgaveL7','peakmgstdL7','peakwthaveL7','peakwthstdL7',
'peaknumL6','peakdisaveL6','peakdisstdL6','peakmgaveL6','peakmgstdL6','peakwthaveL6','peakwthstdL6',
'peaknumL5','peakdisaveL5','peakdisstdL5','peakmgaveL5','peakmgstdL5','peakwthaveL5','peakwthstdL5',
'peaknumL4','peakdisaveL4','peakdisstdL4','peakmgaveL4','peakmgstdL4','peakwthaveL4','peakwthstdL4',
'peaknumL3','peakdisaveL3','peakdisstdL3','peakmgaveL3','peakmgstdL3','peakwthaveL3','peakwthstdL3',
'peaknumL2','peakdisaveL2','peakdisstdL2','peakmgaveL2','peakmgstdL2','peakwthaveL2','peakwthstdL2',
'peaknumL1','peakdisaveL1','peakdisstdL1','peakmgaveL1','peakmgstdL1','peakwthaveL1','peakwthstdL1',
'peaknumR7','peakdisaveR7','peakdisstdR7','peakmgaveR7','peakmgstdR7','peakwthaveR7','peakwthstdR7',
'peaknumR6','peakdisaveR6','peakdisstdR6','peakmgaveR6','peakmgstdR6','peakwthaveR6','peakwthstdR6',
'peaknumR5','peakdisaveR5','peakdisstdR5','peakmgaveR5','peakmgstdR5','peakwthaveR5','peakwthstdR5',
'peaknumR4','peakdisaveR4','peakdisstdR4','peakmgaveR4','peakmgstdR4','peakwthaveR4','peakwthstdR4',
'peaknumR3','peakdisaveR3','peakdisstdR3','peakmgaveR3','peakmgstdR3','peakwthaveR3','peakwthstdR3',
'peaknumR2','peakdisaveR2','peakdisstdR2','peakmgaveR2','peakmgstdR2','peakwthaveR2','peakwthstdR2',
'peaknumR1','peakdisaveR1','peakdisstdR1','peakmgaveR1','peakmgstdR1','peakwthaveR1','peakwthstdR1']
to_recomp = ['APdiff','APcoraL','APcoraR','LMdiff','LMcoraL','LMcoraR','fftmean','fftstd','fftweight','fftennergy','fftskewness','overlap','inerstepinterval']
return columnNames,to_recomp
'''Make a feature list'''
def comp_base_features(windowtotal):
feature_all = list()
for each in windowtotal:
featureforone = []
window = each
temp = sensorgeneral(window)
for i in temp:
for j in i:
featureforone.append(j)
temp = inerpeakinfo(window)
for i in temp:
for j in i:
featureforone.append(j)
feature_all.append(featureforone)
return feature_all
def recomp(windowtotal,availableSensors,availableFeatureNames):
df_featureplus = pd.DataFrame()
if 'APdiff' not in availableFeatureNames:
featuretotAP = []
for window in windowtotal:
featureforone = []
temp = anterposterNAKDR(window,availableSensors)
for i in temp:
featureforone.append(i)
featuretotAP.append(featureforone)
if len(featureforone)!= 0:
df_AP = pd.DataFrame(featuretotAP,columns = ['APdiff','APcoraL','APcoraR'])
df_featureplus = pd.concat([df_featureplus, df_AP], axis=1, sort=False)
if 'LMdiff' not in availableFeatureNames:
featuretotLM = []
for window in windowtotal:
featureforone = []
temp = lattomidNAKDR(window,availableSensors)
for i in temp:
featureforone.append(i)
featuretotLM.append(featureforone)
if len(featureforone)!= 0:
df_LM = pd.DataFrame(featuretotLM,columns = ['LMdiff','LMcoraL','LMcoraR'])
df_featureplus = pd.concat([df_featureplus,df_LM], axis=1, sort=False)
featuretotFFT = []
for window in windowtotal:
featureforone = []
temp = fft_RE(window,availableSensors)
for i in temp:
featureforone.append(i)
featuretotFFT.append(featureforone)
df_FFT = pd.DataFrame(featuretotFFT,columns = ['fftmean','fftstd','fftweight','fftennergy','fftskewness'])
df_featureplus = pd.concat([df_featureplus,df_FFT], axis=1, sort=False)
featuregatephase = []
for window in windowtotal:
featuregatephase.append([overlappingrate(window,availableSensors),instepfeature(window,availableSensors)])
df_gatephase = pd.DataFrame(featuregatephase,columns=['overlap','inerstepinterval'])
df_featureplus = pd.concat([df_featureplus,df_gatephase], axis = 1, sort=False)
return df_featureplus
'''Get list of available features'''
def get_available_features(availableSensors, columnNames, nSensors = 14, totsinglesensordep = 4, totsinglesensorcol = 7):
'''
!!NOTICE!!
when input the intrest configurations follow this:
L1 -- 6 R1 -- 13
L2 -- 5 R2 -- 12
L3 -- 4 R3 -- 11
L4 -- 3 R4 -- 10
L5 -- 2 R5 -- 9
L6 -- 1 R6 -- 8
L7 -- 0 R7 -- 7
'''
requirements = []
for _ in range(totsinglesensordep):
requirements += [[i] for i in range(nSensors)]
for i in range(nSensors):
requirements += [[i] for _ in range(totsinglesensorcol)]
availableFeatureNames =[]
for i in range(len(requirements)):
if all(sensor in availableSensors for sensor in requirements[i]):
availableFeatureNames.append(columnNames[i])
return availableFeatureNames
#================================#
#===== High level functions =====#
#================================#
def base_train_evaluate(dfsample, training_subjects_assignments,nRepeats = 100, interval = 5, nTrees = 100):
n_ft = dfsample.shape[1] #Get the size of sample list: The result here is feature number + label number
score_rf = []
score_all = []
pred_all = []
test_all = []
for training_subject in training_subjects_assigments:
X_train = np.array([], dtype=np.int64).reshape(0,n_ft-2)
X_test = X_train.copy() #Empty, same size, for test sample feature vector
y_train = [] #To fill with training labels
y_test = [] #To fill with test labels
trn = dfsample.loc[dfsample['Subject'].isin (training_subject)] #Select training and testing samples according to subjects
other_indexes = [i for i in unique_labels(dfsample['Subject']) if i not in training_subject]
tst = dfsample.loc[dfsample['Subject'].isin (other_indexes)]
X_train = np.concatenate((X_train, trn.iloc[:,0:-2].values), axis=0)
X_test = np.concatenate((X_test, tst.iloc[:,0:-2].values), axis=0)
y_train = np.concatenate((y_train, trn.iloc[:,-1].values), axis=0)
y_test = np.concatenate((y_test, tst.iloc[:,-1].values), axis=0)
repeats = nRepeats
seed = 0
featureimportance = np.zeros(len(dfsample.keys())-2)
for index in range(repeats):
seed += interval
rf = RandomForestClassifier(n_estimators=nTrees, random_state=seed, verbose=0,
min_samples_split=2, class_weight="balanced", n_jobs = 1)
rf.fit(X_train, y_train)
featureimportance += rf.feature_importances_
# print(rf.feature_importances_)
y_pred = rf.predict(X_test)
score_rf.append(accuracy_score(y_test, y_pred))
pred_all.append(y_pred)
test_all.append(y_test)
featureimportance /= repeats
return score_rf,pred_all,test_all,featureimportance
def setup_evaluation(path,windowLength = 1500):
#Read files
files = read_csvs(path)
#Filter data
filt_list = filtdata(files)
#Cut windows
windowtotal = []
df_label = pd.DataFrame()
for i in range(len(filt_list)):
temp = cutWindow(filt_list[i],windowLength,int(windowLength/2))
for t in temp:
windowtotal.append(t)
df_label = pd.concat([df_label,files[i][:1][['Subject','Activity']]],ignore_index = True)
print(df_label)
featureNames, to_recomp = init_feature_names()
feature_all = comp_base_features(windowtotal)
df_feature = pd.DataFrame(feature_all,columns=featureNames)
#df_label = pd.DataFrame(label_all,columns = ('Subject','Activity')) #Label columns
return windowtotal, df_feature, df_label
def remove_features(df_sample, featureimportance, differencePerRun = 20):
#differencePerRun: number of features removed per run
lfeatureimportance = featureimportance.tolist()
max_num_index = map(lfeatureimportance.index,
heapq.nlargest(len(lfeatureimportance)-differencePerRun, lfeatureimportance))
ft_selected = df_sample.iloc[:,list(max_num_index)]
print(ft_selected.columns) #Get the names of 40 most important features
sample_ft_selected = pd.concat([ft_selected, df_sample[['Subject','Activity']]], axis=1, sort=False)
return sample_ft_selected
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
## This function prints and plots the confusion matrix.
## Normalization can be applied by setting `normalize=True`.
#sns.set(rc={'figure.figsize':(11.7,8.27)})
font = {'weight':'normal', 'size': 18,}
## Adapted from the original code of sklearn and Dian
fig, ax = plt.subplots()
fig.set_size_inches(8, 8)
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = unique_labels(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
#print(cm)
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
xticklabels=classes, yticklabels=classes)
#ylabel='True label',
#xlabel='Predicted label')
plt.xlabel('Predicted label',fontsize = 18)
plt.ylabel('True label',fontsize = 18)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
#ax.figure.colorbar(im,ax=ax)
ax.set_aspect ('equal')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=30, ha="right",
rotation_mode="anchor", fontsize = 18)
plt.setp(ax.get_yticklabels(), rotation=0, ha="right",
rotation_mode="anchor", fontsize = 18)
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black",fontdict = font)
fig.tight_layout()
return ax
def maxfeature_run(availableSensors,training_subjects_assigments, windowtotal,df_feature,df_label, output_folder = '', nTrees = 100):
featureNames = list(df_feature.columns)
configName = " ".join(str(x) for x in availableSensors)
available_features = get_available_features(availableSensors,featureNames)
#print(available_features)
df_temp = recomp(windowtotal,availableSensors,available_features)
#print(df_temp)
sample = pd.concat([df_feature[available_features], df_temp, df_label], axis=1, sort=False)
sample = sample.fillna(0)
totalfeatures = len(sample.columns) - len(df_label.columns)
scores,preds,reals,featureimportance = base_train_evaluate(sample,training_subjects_assigments, nRepeats = 20, nTrees = nTrees)
df_score = pd.DataFrame(scores,columns = [totalfeatures])
df_score.to_csv(output_folder+configName+'_'+str(totalfeatures)+'.csv')
#print(featureimportance)
#print(sample.columns.to_numpy()[:-len(df_label.columns)])
df_featureimportance = pd.DataFrame([featureimportance],columns = sample.columns.to_numpy()[:-len(df_label.columns)])
df_featureimportance.to_csv(output_folder+configName+'_'+str(totalfeatures)+'features.csv')
overall_true_categories = []
overall_predictions = []
cmnmlzd = []
for i in range(len(reals)):
overall_true_categories.extend(list(reals[i]))
overall_predictions.extend(list(preds[i]))
cm = confusion_matrix(reals[i], preds[i])
cmnmlzd.append(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis])
#print(availableSensors,(totalfeatures-nFeatures),"acc:",accuracy_score(overall_true_categories, overall_predictions)) #average accurate
meancm = sum(cmnmlzd)/len(cmnmlzd)#average accurate
with sns.axes_style("whitegrid",{'axes.grid': False}):
fig = plot_confusion_matrix(overall_true_categories,overall_predictions,classes=['upstairs', 'downstairs', 'housework', 'run', 'sit', 'stand', 'walk', 'upslope', 'cycling','walk'], normalize=True,title='Normalized confusion matrix')
plt.savefig(fname = output_folder+'heatmap_'+configName+'_'+str(totalfeatures)+'.pdf',format="pdf")
plt.close()
np.savetxt(output_folder+'overall_true_pred_'+configName+'_'+str(totalfeatures)+'.dat',meancm,delimiter=',')
#with open('overall_true_pred_'+configName+'_'+str(totalfeatures-nFeatures)+'.dat','w') as savefile:
# savefile.write(str(overall_true_categories))
# savefile.write("\n")
# savefile.write(str(overall_predictions))
def full_run(availableSensors,training_subjects_assigments, windowtotal,df_feature,df_label, output_folder = '', nTrees = 100):
featureNames = list(df_feature.columns)
configName = " ".join(str(x) for x in availableSensors)
available_features = get_available_features(availableSensors,featureNames)
#print(available_features)
df_temp = recomp(windowtotal,availableSensors,available_features)
#print(df_temp)
sample = pd.concat([df_feature[available_features], df_temp, df_label], axis=1, sort=False)
sample = sample.fillna(0)
totalfeatures = len(sample.columns) - len(df_label.columns)
for nFeatures in range(totalfeatures):
scores,preds,reals,featureimportance = base_train_evaluate(sample,training_subjects_assigments, nRepeats = 20, nTrees = nTrees)
df_score = pd.DataFrame(scores,columns = [totalfeatures-nFeatures])
df_score.to_csv(output_folder+configName+'_'+str(totalfeatures-nFeatures)+'.csv')
#print(featureimportance)
#print(sample.columns.to_numpy()[:-len(df_label.columns)])
df_featureimportance = pd.DataFrame([featureimportance],columns = sample.columns.to_numpy()[:-len(df_label.columns)])
df_featureimportance.to_csv(output_folder+configName+'_'+str(totalfeatures-nFeatures)+'features.csv')
overall_true_categories = []
overall_predictions = []
cmnmlzd = []
for i in range(len(reals)):
overall_true_categories.extend(list(reals[i]))
overall_predictions.extend(list(preds[i]))
cm = confusion_matrix(reals[i], preds[i])
cmnmlzd.append(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis])
print(availableSensors,(totalfeatures-nFeatures),"acc:",accuracy_score(overall_true_categories, overall_predictions))
meancm = sum(cmnmlzd)/len(cmnmlzd)
with sns.axes_style("whitegrid",{'axes.grid': False}):
fig = plot_confusion_matrix(overall_true_categories,overall_predictions,classes=['upstairs', 'downstairs', 'housework', 'run', 'sit', 'stand', 'walk', 'upslope', 'cycling','walk'], normalize=True,title='Normalized confusion matrix')
plt.savefig(fname = output_folder+'heatmap_'+configName+'_'+str(totalfeatures-nFeatures)+'.pdf',format="pdf")
plt.close()
np.savetxt(output_folder+'overall_true_pred_'+configName+'_'+str(totalfeatures-nFeatures)+'.dat',meancm,delimiter=',')
#with open('overall_true_pred_'+configName+'_'+str(totalfeatures-nFeatures)+'.dat','w') as savefile:
# savefile.write(str(overall_true_categories))
# savefile.write("\n")
# savefile.write(str(overall_predictions))
if nFeatures < totalfeatures - 1:
sample = remove_features(sample,featureimportance,differencePerRun = 1)
if __name__ == '__main__':
start_time = time.time()
output_folder = 'output/'
nTrees = 100
windowLength = [100,500,1000,1500,2000,2500,3000,3500,4000,4500,5000,5500,6000]
interestConfig67 = [np.array([i,i+7]) for i in range(7)] #all single sensors (pairs)
interestConfig67 += [np.array([6,6-i,13,13-i]) for i in range(1,6)] #all pair of sensors including the heel
interestConfig67 += [np.array([2,4,9,11]),np.array([0,2,7,9]),np.array([0,4,7,11]),np.array([0,3,7,10])] #horizontal pairs
interestConfig67 += [np.array([0,1,7,8]),np.array([0,5,7,12]),np.array([0,6,7,13])]
interestConfig67 += [np.array([0,2,6,7,9,13]),np.array([0,4,6,7,11,13]),np.array([2,4,6,9,11,13]),np.array([2,3,4,9,10,11])] # 3 sensors
interestConfig67 += [np.array([0,1,6,7,8,13]),np.array([0,1,3,7,8,10]),np.array([0,5,6,7,12,13]),np.array([0,1,2,7,8,9])] # 3 sensors
interestConfig67 += [np.array([0,4,5,7,11,12]),np.array([2,4,5,9,11,12]),np.array([0,2,5,7,9,12]),np.array([1,5,6,8,12,13]), np.array([0,1,5,7,8,12])] # 3 sensors
interestConfig67 += [np.array([2,3,4,6,9,10,11,13]),np.array([0,2,3,6,7,9,10,13]),np.array([0,3,4,6,7,10,11,13]),np.array([0,2,4,6,7,9,11,13]),np.array([0,1,3,6,7,8,10,13])] # 4 sensors
interestConfig67 += [np.array([0,1,2,3,7,8,9,10]),np.array([0,1,3,4,7,8,10,11]),np.array([1,2,3,4,8,9,10,11]),np.array([2,3,4,5,9,10,11,12])] # 4 sensors
interestConfig67 += [np.array([0,2,3,5,7,9,10,12]),np.array([0,3,4,5,7,10,11,12]),np.array([0,2,4,5,7,9,11,12]),np.array([0,1,5,6,7,8,12,13])] # 4 sensors
interestConfig67 += [np.array([1,2,5,6,8,9,12,13]),np.array([1,4,5,6,8,11,12,13]),np.array([3,4,5,6,10,11,12,13]),np.array([0,4,5,6,7,11,12,13]), np.array([1,2,3,6,8,9,10,13])] # 4 sensors
interestConfig67 += [np.array([0,2,3,4,6,7,9,10,11,13]),np.array([0,2,4,5,6,7,9,11,12,13]),np.array([0,1,4,5,6,7,8,11,12,13]),np.array([0,1,2,5,6,7,8,9,12,13])] # 5 sensors
interestConfig67 += [np.array([1,3,4,5,6,8,10,11,12,13]),np.array([2,3,4,5,6,9,10,11,12,13]),np.array([0,2,3,4,5,7,9,10,11,12]),np.array([1,2,3,4,5,8,9,10,11,12]),np.array([0,1,2,3,4,7,8,9,10,11])] # 5 sensors
interestConfig67 += [np.delete(np.array([i for i in range(14)]),[j,j+7]) for j in range(7)]
interestConfig67 += [np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13])] # sensors
interestConfig = interestConfig67
def changeConfigsto127():
from itertools import combinations
interestConfig127 = []
for combi in [combinations([0, 1, 2, 3, 4, 5, 6], i) for i in range(1,8)]:
for Config in combi:
interestConfig127 += [np.concatenate((np.array(Config),np.array(Config)+7),axis=None)]
return interestConfig127
def changeConfigsto25():
interestConfig25 = [np.array([6,13]), np.array([0,7]), np.array([4,6,11,13]), np.array([0,6,7,13]), np.array([2,4,9,11]), np.array([0,3,7,10]),
np.array([0,4,6,7,11,13]), np.array([0,5,6,7,12,13]), np.array([2,3,4,9,10,11]), np.array([0,1,3,7,8,10]),
np.array([3,4,5,6,10,11,12,13]), np.array([0,4,5,6,7,11,12,13]), np.array([2,3,4,6,9,10,11,13]), np.array([1,2,3,4,8,9,10,11]),
np.array([0,1,4,5,6,7,8,11,12,13]), np.array([0,2,3,4,6,7,9,10,11,13]), np.array([0,2,4,5,6,7,9,11,12,13]), np.array([2,3,4,5,6,9,10,11,12,13]),
np.array([0,1,2,3,4,7,8,9,10,11]), np.array([0,2,3,4,5,7,9,10,11,12]),
np.array([0,1,3,4,5,6,7,8,10,11,12,13]), np.array([0,2,3,4,5,6,7,9,10,11,12,13]), np.array([0,1,2,4,5,6,7,8,9,11,12,13]), np.array([0,1,2,3,4,5,7,8,9,10,11,12]),
np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13])]
return interestConfig25
availableThreads = psutil.cpu_count()
basePath = sys.argv[1] if len(sys.argv) > 1 else None
if basePath == None:
print("Error: basePath not set. Please set path.")
exit()
if len(sys.argv) > 2:
output_folder = sys.argv[2]
if len(sys.argv) > 3:
windowLength = [int(sys.argv[3])]
if len(sys.argv) > 4:
if int(sys.argv[4]) == 127:
interestConfig = changeConfigsto127()
elif int(sys.argv[4]) == 67:
pass
elif int(sys.argv[4]) == 25:
interestConfig = changeConfigsto25()
else:
print("Error: Configuration number is not available, please replace Configuration_number with 127 for result of all sensor configurations or 67 for original analysing results or 25 for configurations in the paper")
exit()
totalFeatures = [setup_evaluation(basePath,windowLength=winLength) for winLength in windowLength]
#windowtotal, df_feature, df_label = setup_evaluation(basePath,windowLength=windowLength)
featureNames = list(totalFeatures[0][2].columns)
print("Tried windowsize:", windowLength)
#print("Tried configurations:", interestConfig)
#print("Configuration number:", len(interestConfig))
#training_subjects_assigments = [np.random.choice([4,5,7,8,9,10,11,13,15,18,30],6,replace=False) for _ in range(5)]
training_subjects_assigments = [[4,5,7,8,9,10],[7,8,11,13,15,30],[4,7,9,10,15,18],[4,5,11,13,18,30],[7,8,10,15,18,30]]
print("Training assignments:",training_subjects_assigments)
with open(output_folder+'assignments.txt','w') as f:
f.write(str(training_subjects_assigments))
f.flush()
for elem in totalFeatures:
elem[2].loc[elem[2].Activity == 'walkF','Activity'] = 'walk'
elem[2].loc[elem[2].Activity == 'walkN','Activity'] = 'walk'
elem[2].loc[elem[2].Activity == 'jog','Activity'] = 'run'
elem[2].loc[elem[2].Activity == 'nonlocal','Activity'] = 'housework'
elem[2].loc[elem[2].Activity == 'sit','Activity'] = 'sitting'
labels = ['upstairs', 'downstairs', 'housework', 'run', 'sitting', 'standing', 'upslope', 'cycling','walk']
def temp_run(availableSensors):
full_run(availableSensors,training_subjects_assigments, totalFeatures[0][0],totalFeatures[0][1],totalFeatures[0][2],output_folder = output_folder,nTrees = nTrees)
def temp_run_fullfeatures(elem):
maxfeature_run(interestConfig[-1],training_subjects_assigments, elem[0],elem[1],elem[2],output_folder = output_folder+str(len(elem[0][0]))+"_",nTrees = nTrees)
with Pool(availableThreads) as pool:
if len(windowLength) > 1:
pool.map(temp_run_fullfeatures,totalFeatures)
else:
pool.map(temp_run,interestConfig)
#
print('Strart time',start_time,'end time',time.time())
|
import ssl
import pytest
import aiohttp
from hailtop.auth import service_auth_headers
from hailtop.config import get_deploy_config
from hailtop.tls import _get_ssl_config
from hailtop.utils import retry_transient_errors
deploy_config = get_deploy_config()
@pytest.mark.asyncio
async def test_connect_to_address_on_pod_ip():
ssl_config = _get_ssl_config()
client_ssl_context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH, cafile=ssl_config['outgoing_trust']
)
client_ssl_context.load_default_certs()
client_ssl_context.load_cert_chain(ssl_config['cert'], keyfile=ssl_config['key'], password=None)
client_ssl_context.verify_mode = ssl.CERT_REQUIRED
client_ssl_context.check_hostname = False
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(ssl=client_ssl_context),
raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=5),
headers=service_auth_headers(deploy_config, 'address'),
) as session:
async def get():
address, port = await deploy_config.address('address')
session.get(f'https://{address}:{port}{deploy_config.base_path("address")}/api/address')
await retry_transient_errors(get)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.